prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
""" Provide the ``SoilProfile`` class.
"""
# -- Imports -----------------------------------------------------------------
from edafos.project import Project
from edafos.viz import ProfilePlot
from tabulate import tabulate
import numpy as np
import pandas as pd
# -- SoilProfile Class -------------------------------------------------------
class SoilProfile(Project):
""" Class to represent a new soil profile.
.. warning::
Pay attention to the base units for each unit system that you
choose to use. Refer to the parameter definition below or the
:ref:`input_units` page.
"""
# -- Constructor ---------------------------------------------------------
def __init__(self, unit_system, water_table, name=None):
"""
Args:
water_table (float): Depth to water table measured from ground
elevation.
- For **SI**: Enter value in **meters**.
- For **English**: Enter value in **feet**.
name (str): A name for the soil profile (default is None)
unit_system (str): The unit system for the project. Can only be
'English', or 'SI'. Properties inherited from the ``Project``
class.
"""
super().__init__(unit_system=unit_system)
# Set units for the water table
self.water_table = float(water_table) * self.set_units('length')
# A name for the soil profile object
self.name = name
# Initiate SPT data attribute
self.spt_data = None
# Call function to instantiate the soil profile data frame
self._create_profile()
# -- Soil Profile Instantiation Method (Private) -------------------------
def _create_profile(self):
""" A private method that instantiates the soil profile data frame.
Returns:
An empty Pandas DataFrame with two headers, one for the column
names and another for the column units.
"""
# Careful when changing column named. Update API.get_soil_prop
name_list = ['Soil Type', 'Soil Desc', 'Depth', 'Height', 'TUW',
'Field N', 'Corr. N', 'Field Phi', 'Calc. Phi', 'Shear Su']
self.layers = pd.DataFrame(columns=name_list)
self.layers.index.name = 'Layer'
return self
# -- Method to add layers ------------------------------------------------
def add_layer(self, soil_type, height, **kwargs):
""" Method to add a new layer to the soil profile.
.. todo::
Run parameter checks for allowable ranges and required info, for
example, raise a warning for a cohesionless layer without shear
strength.
Args:
soil_type (str): Allowed values are 'cohesive' for clays and
'cohesionless' for sands.
height (float): Height of soil layer.
- For **SI**: Enter height in **meters**.
- For **English**: Enter height in **feet**.
Keyword Args:
soil_desc (str): Soil description. Initially created to accommodate
the :ref:`olson90-method`. As such, in order to follow the
guidelines in :numref:`Olson90_table`, the only valid inputs
are: ``gravel``, ``sand-gravel``, ``sand``, ``sand-silt``,
``silt``. TODO: There is no check to reject these inputs for
``soil_type = 'cohesive'``, although they have no effect.
tuw (float): Total unit weight of soil.
- For **SI**: Enter TUW in **kN/m**\ :sup:`3`.
- For **English**: Enter TUW in **lbf/ft**\ :sup:`3`.
field_n (int): Field SPT-N values.
corr_n (int): Corrected Field SPT-N values.
.. note::
If field SPT-N value is given without the corrected SPT-N,
the corrected value will be automatically calculated.
field_phi (float): Field internal angle of friction, *φ*,
in degrees.
calc_phi (float): Calculated internal angle of friction,
*φ*, from SPT-N values.
su (float): Undrained shear strength, *s*\ :sub:`u`.
- For **SI**: Enter *s*\ :sub:`u` in **kN/m**\ :sup:`2`.
- For **English**: Enter *s*\ :sub:`u` in **kip/ft**\ :sup:`2`.
"""
# Check for valid attributes
# If you update these keys, make sure to update API.get_soil_prop too
allowed_keys = ['soil_type', 'soil_desc', 'height', 'tuw', 'field_n',
'corr_n', 'field_phi', 'calc_phi', 'su']
for key in kwargs:
if key not in allowed_keys:
raise AttributeError("'{}' is not a valid attribute. The "
"allowed attributes are: {}"
"".format(key, allowed_keys))
# Assign values
soil_desc = kwargs.get('soil_desc', None)
tuw = kwargs.get('tuw', None)
field_n = kwargs.get('field_n', None)
corr_n = kwargs.get('corr_n', None)
field_phi = kwargs.get('field_phi', None)
calc_phi = kwargs.get('calc_phi', None)
su = kwargs.get('su', None)
# Check for soil type
if soil_type in ['cohesive', 'cohesionless']:
soil_type = soil_type
else:
raise ValueError("Soil type can only be 'cohesive' or "
"'cohesionless'.")
# Check for soil description
allowed_soil_desc = ['gravel', 'sand-gravel', 'sand', 'sand-silt',
'silt']
if (soil_desc is not None) and (soil_desc not in allowed_soil_desc):
raise ValueError("'{}' is not a valid soil description input.\n"
"Valid inputs are: {}."
"".format(soil_desc, allowed_soil_desc))
# Check that all inputs are positive numbers
for i in [height, tuw, field_n, corr_n, field_phi, calc_phi, su]:
if (i is not None) and (type(i) not in [int, float]):
raise TypeError("Value '{}' is of type {} and is not "
"permissible. \nEnter only positive numbers "
"(int or float) for soil properties."
"".format(i, type(i)))
elif (i is not None) and (i < 0):
raise ValueError("Value '{}' is not permissible. Enter positive"
" numbers only for soil properties.".format(i))
else:
pass
# Calculate depth from layers heights
if len(self.layers) == 0:
depth = height
else:
depth = self.layers.loc[len(self.layers), 'Depth'] + height
# Store values in data frame
self.layers.loc[len(self.layers)+1] = [
soil_type, soil_desc, depth, height, tuw, field_n, corr_n,
field_phi, calc_phi, su]
# Reset index to start at 1
if self.layers.index[0] == 0:
self.layers.index = self.layers.index + 1
# Enforce proper data types
# TODO: Repeated every time a layer is added. Is there a better way?
self.layers.fillna(np.nan, inplace=True)
# for column in self.layers.columns.levels[0]:
for column in self.layers.columns:
if column not in ['Soil Type', 'Soil Desc']:
self.layers[column] = self.layers[column].astype(float)
return self
# -- Method that adds SPT-N data -----------------------------------------
def add_spt_data(self, data, from_csv=False):
""" Method that adds SPT-N values, either as a list (of lists) or
imported from a CSV file.
Args:
data (list): a list of lists for SPT-N data. The first list must
contain the depth values while the second list must contain
the N values.
from_csv (bool): Set to 'True' and specify the path to the CSV file.
Returns:
"""
# TODO: add CSV import functionality
# TODO: check SPT values, make them integers
# TODO: produce corrected SPT values
if from_csv:
pass
else:
df = | pd.DataFrame({'Depth': data[0], 'SPT-N': data[1]}) | pandas.DataFrame |
import datetime
import pandas as pd
from dateutil.relativedelta import relativedelta
from timeseries import slice_by_timestamp
from yearly import replace_year
def set_to_begin(values):
"""Set the dates and times in the list to the begin of the month
:param values:
:type values:
:return:
:rtype:
"""
return [pd.Timestamp(v).replace(day=1, hour=0, minute=0, second=0, microsecond=0) for v in values]
def set_to_end(values):
"""Set the dates and times in the list to the end of the month
:param values:
:type values:
:return:
:rtype:
"""
try:
return [pd.Timestamp(v).replace(day=last_day(v), hour=23, minute=59, second=59, microsecond=999999) for v in values]
except TypeError:
return pd.Timestamp(values).replace(day=last_day(values), hour=23, minute=59, second=59, microsecond=999999)
def last_day(dt):
return (pd.Timestamp(dt) + | pd.tseries.offsets.MonthEnd(n=0) | pandas.tseries.offsets.MonthEnd |
# Copyright (c) 2013, GreyCube Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import cint
import pandas as pd
import numpy as np
def execute(filters=None):
return get_data(filters)
def get_data(filters):
columns = get_columns(filters)
data = frappe.db.sql(
"""
select
tja.name applicant, tja.applicant_name, tja.source,
tja.status, tjo.job_title, tjo.customer_cf,
concat_ws(' - ', round(tja.lower_range), round(tja.upper_range)) salary_range,
tja.applicant_total_experience_cf, tja.previous_company_cf
from `tabJob Applicant` tja
inner join `tabJob Opening` tjo on tjo.name = tja.job_title
""",
as_dict=True,
)
df1 = pd.DataFrame.from_records(data)
df1.set_index("applicant")
social_media = frappe.db.sql(
"""
select
tja.name applicant, tsmpu.social_media_platform,
coalesce(tsmpu.profile_url,"") profile_url
from `tabSocial Media Profile URL` tsmpu
inner join `tabJob Applicant` tja on tja.name = tsmpu.parent ;
""",
as_dict=True,
)
if not social_media:
return columns, df1.to_dict(orient="records")
df2 = pd.DataFrame.from_records(social_media)
df2 = pd.pivot_table(
df2,
index=["applicant"],
columns=["social_media_platform"],
values=["profile_url"],
aggfunc="first",
fill_value="",
)
df2.columns = [
frappe.scrub(d.replace("profile_url_", "")) for d in df2.columns.map("_".join)
]
df3 = | pd.merge(df1, df2, how="left", on=["applicant"]) | pandas.merge |
"""Ingest Pollard data."""
from pathlib import Path
import pandas as pd
from . import db
from . import util
from .util import log
DATASET_ID = 'pollard'
RAW_DIR = Path('data') / 'raw' / DATASET_ID
PLACE_CSV = RAW_DIR / 'Pollard_locations.csv'
DATA_CSV = RAW_DIR / 'pollardbase_example_201802.csv'
def ingest():
"""Ingest the data."""
raw_data = get_raw_data()
db.delete_dataset_records(DATASET_ID)
db.insert_dataset({
'dataset_id': DATASET_ID,
'title': 'Pollard lepidoptera observations',
'version': '2018-02',
'url': ''})
to_taxon_id = insert_taxa(raw_data)
to_place_id = insert_places(raw_data)
insert_events(raw_data, to_place_id)
insert_counts(raw_data, to_taxon_id)
def get_raw_data():
"""Read raw data."""
log(f'Getting {DATASET_ID} raw data')
raw_data = pd.read_csv(DATA_CSV, dtype='unicode')
util.normalize_columns_names(raw_data)
raw_data['started'] = pd.to_datetime(raw_data.Start_time, errors='coerce')
raw_data['sci_name'] = \
raw_data.Scientific_Name.str.split().str.join(' ')
raw_data['dataset_id'] = DATASET_ID
has_started = raw_data['started'].notna()
has_sci_name = raw_data['sci_name'].notna()
raw_data = raw_data.loc[has_started & has_sci_name, :].copy()
return raw_data
def insert_taxa(raw_data):
"""Insert taxa."""
log(f'Inserting {DATASET_ID} taxa')
cxn = db.connect()
firsts = raw_data['sci_name'].duplicated(keep='first')
taxa = raw_data.loc[~firsts, ['sci_name', 'Species']]
taxa.rename(columns={'Species': 'common_name'}, inplace=True)
taxa['genus'] = taxa['sci_name'].str.split().str[0]
taxa['class'] = 'lepidoptera'
taxa['group'] = None
taxa['order'] = None
taxa['family'] = None
taxa['target'] = None
taxa = db.drop_duplicate_taxa(taxa)
taxa['taxon_id'] = db.create_ids(taxa, 'taxa')
taxa['taxon_id'] = taxa['taxon_id'].astype(int)
taxa['taxon_json'] = '{}'
taxa.to_sql('taxa', cxn, if_exists='append', index=False)
sql = """SELECT sci_name, taxon_id
FROM taxa
WHERE "class" = 'lepidoptera'"""
return pd.read_sql(sql, cxn).set_index('sci_name').taxon_id.to_dict()
def insert_places(raw_data):
"""Insert places."""
log(f'Inserting {DATASET_ID} places')
raw_places = | pd.read_csv(PLACE_CSV, dtype='unicode') | pandas.read_csv |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Unit tests for the utilities within the `calibration.dataframe_utilities`
module.
"""
import unittest
import iris
import numpy as np
import pandas as pd
import pytest
from improver.calibration.dataframe_utilities import (
forecast_and_truth_dataframes_to_cubes,
forecast_dataframe_to_cube,
truth_dataframe_to_cube,
)
from improver.metadata.constants.time_types import TIME_COORDS
from improver.spotdata.build_spotdata_cube import build_spotdata_cube
from improver_tests import ImproverTest
def _chunker(seq, size):
"""Helper function to iterate through a sequence in chunks.
Args:
seq:
The sequence to be chunked.
size:
The size of the chunks.
Return:
A sequence split into chunks.
"""
return (seq[pos : pos + size] for pos in range(0, len(seq), size))
class SetupSharedDataFrames(ImproverTest):
"""A shared dataframe creation class."""
def setUp(self):
"""Set-up forecast and truth dataframes."""
pytest.importorskip("pandas")
data = np.array(
[5.2, 0.3, 20.4, 6.5, 3.1, 21.5, 7.2, 4.2, 24.3], dtype=np.float32
)
self.forecast_data = np.tile(data, 3)
self.frt1 = pd.Timestamp("2017-07-20T12:00:00", tz="UTC")
self.frt2 = pd.Timestamp("2017-07-21T12:00:00", tz="UTC")
self.frt3 = pd.Timestamp("2017-07-22T12:00:00", tz="UTC")
self.fp = pd.Timedelta(6 * 3600, unit="s")
self.time1 = pd.Timestamp("2017-07-20T18:00:00", tz="UTC")
self.time2 = pd.Timestamp("2017-07-21T18:00:00", tz="UTC")
self.time3 = pd.Timestamp("2017-07-22T18:00:00", tz="UTC")
self.wmo_ids = ["03002", "03003", "03004"]
self.percentiles = np.array([25.0, 50.0, 75.0], dtype=np.float32)
diag = "air_temperature"
self.cf_name = "air_temperature"
self.latitudes = np.array([50.0, 60.0, 70.0], dtype=np.float32)
self.longitudes = np.array([-10.0, 0.0, 10.0], dtype=np.float32)
self.altitudes = np.array([10.0, 20.0, 30.0], dtype=np.float32)
self.period = pd.Timedelta(1, unit="h")
self.height = np.array([1.5], dtype=np.float32)
self.units = "Celsius"
df_dict = {
"forecast": self.forecast_data,
"blend_time": np.repeat([self.frt1, self.frt2, self.frt3], 9),
"forecast_period": np.repeat(self.fp, 27),
"forecast_reference_time": np.repeat([self.frt1, self.frt2, self.frt3], 9),
"time": np.repeat([self.time1, self.time2, self.time3], 9),
"wmo_id": np.tile(self.wmo_ids, 9),
"percentile": np.tile(np.repeat(self.percentiles, 3), 3),
"diagnostic": [diag] * 27,
"latitude": np.tile(self.latitudes, 9),
"longitude": np.tile(self.longitudes, 9),
"altitude": np.tile(self.altitudes, 9),
"period": [self.period] * 27,
"height": np.tile(self.height, 27),
"cf_name": [self.cf_name] * 27,
"units": [self.units] * 27,
}
self.forecast_df = pd.DataFrame(df_dict)
data = np.array([6.8, 2.7, 21.2], dtype=np.float32)
self.truth_data = np.tile(data, 3)
df_dict = {
"ob_value": self.truth_data,
"time": np.repeat([self.time1, self.time2, self.time3], 3),
"wmo_id": self.wmo_ids * 3,
"diagnostic": [diag] * 9,
"latitude": np.tile(self.latitudes, 3),
"longitude": np.tile(self.longitudes, 3),
"altitude": np.tile(self.altitudes, 3),
"period": [self.period] * 9,
"height": np.tile(self.height, 9),
"cf_name": [self.cf_name] * 9,
"units": [self.units] * 9,
}
self.truth_df = | pd.DataFrame(df_dict) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
This program automatically extracts and analyses data from a database of AIDS Data.
Data is from:
US Department of Health and Human Services (US DHHS), Centers for Disease Control and Prevention (CDC), National Center for HIV, STD and TB Prevention (NCHSTP), AIDS Public Information Data Set (APIDS) US Surveillance Data for 1981-2002, CDC WONDER On-line Database, December 2005. Accessed at http://wonder.cdc.gov/aids-v2002.html on Mar 9, 2017 2:26:39 PM"
Program was written for analysis of the database and is provided as is.
"""
import pypyodbc
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from AIDSAnalysisProcedures import CreateDataGrid, contourplotVitalAge,contourplotVital,contourplotHIVExpByAgeLogNorm,surface3dAIDSByAgeGroup,contourplotAIDSByAgeGroup,contourplotAIDSByAgeGroupLogNorm,contourplotHIVExpByYear,contourplotHIVExpByYearLogNorm,contourplotHIVExpByAge
plt.close()
#using pypyodbc
#Connect to database (enter your own driver and Database path) and generate cursor
conn_str = r'DRIVER={};DBQ=;'
cnxn = pypyodbc.connect(conn_str)
crsr = cnxn.cursor()
#Extract Table Names
Table_details = []
for row in crsr.columns():
if 'MSys' not in row[2]: #ignore access default databases/tables
Table_details.append((row[2],row[3]))
np_tabledetails=np.array(Table_details)
Table_names = np.unique(np_tabledetails[:,0])
#This code currently assumes the first table in the database
TableChoice = Table_names[0]
#Extract all table column headings
Column_names = np_tabledetails[np_tabledetails[:,0]==TableChoice,1]
#Extract all the unique column entries and their frequency into a dataframe and save
df_BigCount=pd.DataFrame()
for name in Column_names[1:]:
#find all the unique values in the column, including nulls
sql = 'SELECT ' + str(name) + ', COUNT(*) FROM ' + str(TableChoice) + ' AS COUNT GROUP BY ' + str(name)
BigCount = crsr.execute(sql).fetchall()
df_interBigCount=pd.DataFrame(BigCount)
df_interBigCount['Column']=str(name)
df_BigCount= | pd.concat([df_BigCount, df_interBigCount]) | pandas.concat |
import numpy as np
import pandas as pd
import altair as alt
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# Plot a 3d
def Vis3d(X,Y,Z):
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, Z, color='y')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
# Visualise the metrics from the model
def MetricsVis(history):
df = pd.DataFrame(history)
df.reset_index()
df["batch"] = df.index + 1
df = df.melt("batch", var_name="name")
df["val"] = df.name.str.startswith("val")
df["type"] = df["val"]
df["metrics"] = df["val"]
df.loc[df.val == False, "type"] = "training"
df.loc[df.val == True, "type"] = "validation"
df.loc[df.val == False, "metrics"] = df.name
df.loc[df.val == True, "metrics"] = df.name.str.split("val_", expand=True)[1]
df = df.drop(["name", "val"], axis=1)
base = alt.Chart().encode(
x = "batch:Q",
y = "value:Q",
color = "type"
).properties(width = 300, height = 300)
layers = base.mark_circle(size = 50).encode(tooltip = ["batch", "value"]) + base.mark_line()
chart = layers.facet(column='metrics:N', data=df).resolve_scale(y='independent')
return chart
def InteractionVis(df):
vis = alt.Chart(df).mark_rect().encode(
alt.X(field="ITEM", type="nominal",
axis=alt.Axis(orient="top", labelAngle=0)),
alt.Y(field="USER", type="nominal",
axis=alt.Axis(orient="left")),
alt.Color(field="RATING", type="quantitative",
scale=alt.Scale(type="bin-ordinal", scheme='yellowgreenblue', nice=True),
legend=alt.Legend(titleOrient='top', orient="bottom",
direction= "horizontal", tickCount=5))
).properties(
width= 180,
height=300
).configure_axis(
grid=False
)
return vis
def TrainTestVis(train, test):
df = pd.concat([train, test])
maptt = {0: "train", 1: "test"}
df["SPLIT"] = df.split_index.apply(lambda x: maptt[x])
df.head()
vis = alt.Chart(df).mark_rect().encode(
alt.X(field="ITEM", type="nominal",
axis=alt.Axis(orient="top", labelAngle=0)),
alt.Y(field="USER", type="nominal",
axis=alt.Axis(orient="left")),
alt.Color(field="SPLIT", type="ordinal",
scale=alt.Scale(type="ordinal", scheme="darkred", nice=True),
legend=alt.Legend(titleOrient='top', orient="bottom",
direction= "horizontal", tickCount=5)),
alt.Opacity(value=1)
).properties(
width= 180,
height=300
).configure_axis(
grid=False
)
return vis
def EmbeddingVis(embedding, n_factors, name):
embedding_df_wide = | pd.DataFrame(embedding) | pandas.DataFrame |
from sklearn.ensemble import RandomForestRegressor
from sklearn.feature_selection import SelectFromModel
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
from src.features.label_encoder import MultiColumnLabelEncoder
from sklearn.svm import LinearSVR
def Feature_selection(dataset):
cate_cols = ['galaxy']
dataset.X_train = MultiColumnLabelEncoder(columns=cate_cols).transform(dataset.X_train)
dataset.X_val = MultiColumnLabelEncoder(columns=cate_cols).transform(dataset.X_val)
dataset.X_test = MultiColumnLabelEncoder(columns=cate_cols).transform(dataset.X_test)
sel = RandomForestRegressor(n_estimators = 100).fit(dataset.X_train, dataset.y_train)
sel_feature=list(zip(list(dataset.X_train.columns), sel.feature_importances_))
model = SelectFromModel(sel, prefit=True)
set1= dataset.X_train.columns[(model.get_support())]
#Feature selection by using selectKbets
model = SelectKBest(f_classif, k=20).fit(dataset.X_train, dataset.y_train)
set2=dataset.X_train.columns[(model.get_support())]
selected_columns=set(list(set1)+list(set2))
return selected_columns
def Feature_selection_new(ds):
# ds.X_train['y']=ds.y_train
# ds.X_test['y']=ds.y_test
galaxy_train=ds.X_train['galaxy']
galaxy_val=ds.X_val['galaxy']
galaxy_test=ds.X_test['galaxy']
ds.X_train=ds.X_train.drop(columns=['galaxy'])
ds.X_val=ds.X_val.drop(columns=['galaxy'])
# from featexp import get_trend_stats
# stats = get_trend_stats(data=ds.X_train, target_col='y', data_test=ds.X_test)
import pandas as pd
from numpy import loadtxt
from xgboost import XGBRegressor
from xgboost import plot_importance
from matplotlib import pyplot
from sklearn.metrics import mean_squared_error
from numpy import sort
from sklearn.feature_selection import SelectFromModel
from math import sqrt
# fit model no training data
model = XGBRegressor()
model.fit(ds.X_train, ds.y_train)
# plot feature importance
plot_importance(model)
pyplot.show()
# make predictions for test data and evaluate
y_pred = model.predict(ds.X_val)
rmse = sqrt(mean_squared_error(y_pred,ds.y_val))
print("mse: %.7f%%" % (rmse))
# Fit model using each importance as a threshold
thresholds = sort(model.feature_importances_)
columns=list(ds.X_train.columns)
importances=model.feature_importances_.tolist()
column_importances=list(zip(columns,importances))
column_importances_df = | pd.DataFrame(column_importances, columns=['columns', 'importances']) | pandas.DataFrame |
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import pytest
from pandas._libs import iNaT, lib
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
Series,
Timestamp,
date_range,
isna,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray, period_array
class TestSeriesConstructors:
@pytest.mark.parametrize(
"constructor,check_index_type",
[
# NOTE: some overlap with test_constructor_empty but that test does not
# test for None or an empty generator.
# test_constructor_pass_none tests None but only with the index also
# passed.
(lambda: Series(), True),
(lambda: Series(None), True),
(lambda: Series({}), True),
(lambda: Series(()), False), # creates a RangeIndex
(lambda: Series([]), False), # creates a RangeIndex
(lambda: Series((_ for _ in [])), False), # creates a RangeIndex
(lambda: Series(data=None), True),
(lambda: Series(data={}), True),
(lambda: Series(data=()), False), # creates a RangeIndex
(lambda: Series(data=[]), False), # creates a RangeIndex
(lambda: Series(data=(_ for _ in [])), False), # creates a RangeIndex
],
)
def test_empty_constructor(self, constructor, check_index_type):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
expected = Series()
result = constructor()
assert len(result.index) == 0
tm.assert_series_equal(result, expected, check_index_type=check_index_type)
def test_invalid_dtype(self):
# GH15520
msg = "not understood"
invalid_list = [pd.Timestamp, "pd.Timestamp", list]
for dtype in invalid_list:
with pytest.raises(TypeError, match=msg):
Series([], name="time", dtype=dtype)
def test_invalid_compound_dtype(self):
# GH#13296
c_dtype = np.dtype([("a", "i8"), ("b", "f4")])
cdt_arr = np.array([(1, 0.4), (256, -13)], dtype=c_dtype)
with pytest.raises(ValueError, match="Use DataFrame instead"):
Series(cdt_arr, index=["A", "B"])
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
assert not isinstance(scalar, float)
# Coercion
assert float(Series([1.0])) == 1.0
assert int(Series([1.0])) == 1
def test_constructor(self, datetime_series):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty_series = Series()
assert datetime_series.index.is_all_dates
# Pass in Series
derived = Series(datetime_series)
assert derived.index.is_all_dates
assert tm.equalContents(derived.index, datetime_series.index)
# Ensure new index is not created
assert id(datetime_series.index) == id(derived.index)
# Mixed type Series
mixed = Series(["hello", np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
assert mixed[1] is np.NaN
assert not empty_series.index.is_all_dates
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
assert not Series().index.is_all_dates
# exception raised is of type Exception
with pytest.raises(Exception, match="Data must be 1-dimensional"):
Series(np.random.randn(3, 3), index=np.arange(3))
mixed.name = "Series"
rs = Series(mixed).name
xp = "Series"
assert rs == xp
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
msg = "initializing a Series from a MultiIndex is not supported"
with pytest.raises(NotImplementedError, match=msg):
Series(m)
@pytest.mark.parametrize("input_class", [list, dict, OrderedDict])
def test_constructor_empty(self, input_class):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series()
empty2 = Series(input_class())
# these are Index() and RangeIndex() which don't compare type equal
# but are just .equals
tm.assert_series_equal(empty, empty2, check_index_type=False)
# With explicit dtype:
empty = Series(dtype="float64")
empty2 = Series(input_class(), dtype="float64")
tm.assert_series_equal(empty, empty2, check_index_type=False)
# GH 18515 : with dtype=category:
empty = Series(dtype="category")
empty2 = Series(input_class(), dtype="category")
tm.assert_series_equal(empty, empty2, check_index_type=False)
if input_class is not list:
# With index:
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series(index=range(10))
empty2 = Series(input_class(), index=range(10))
tm.assert_series_equal(empty, empty2)
# With index and dtype float64:
empty = Series(np.nan, index=range(10))
empty2 = Series(input_class(), index=range(10), dtype="float64")
tm.assert_series_equal(empty, empty2)
# GH 19853 : with empty string, index and dtype str
empty = Series("", dtype=str, index=range(3))
empty2 = Series("", index=range(3))
tm.assert_series_equal(empty, empty2)
@pytest.mark.parametrize("input_arg", [np.nan, float("nan")])
def test_constructor_nan(self, input_arg):
empty = Series(dtype="float64", index=range(10))
empty2 = Series(input_arg, index=range(10))
tm.assert_series_equal(empty, empty2, check_index_type=False)
@pytest.mark.parametrize(
"dtype",
["f8", "i8", "M8[ns]", "m8[ns]", "category", "object", "datetime64[ns, UTC]"],
)
@pytest.mark.parametrize("index", [None, pd.Index([])])
def test_constructor_dtype_only(self, dtype, index):
# GH-20865
result = pd.Series(dtype=dtype, index=index)
assert result.dtype == dtype
assert len(result) == 0
def test_constructor_no_data_index_order(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
result = pd.Series(index=["b", "a", "c"])
assert result.index.tolist() == ["b", "a", "c"]
def test_constructor_no_data_string_type(self):
# GH 22477
result = pd.Series(index=[1], dtype=str)
assert np.isnan(result.iloc[0])
@pytest.mark.parametrize("item", ["entry", "ѐ", 13])
def test_constructor_string_element_string_type(self, item):
# GH 22477
result = pd.Series(item, index=[1], dtype=str)
assert result.iloc[0] == str(item)
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
ser = Series(["x", None], dtype=string_dtype)
result = ser.isna()
expected = Series([False, True])
tm.assert_series_equal(result, expected)
assert ser.iloc[1] is None
ser = Series(["x", np.nan], dtype=string_dtype)
assert np.isnan(ser.iloc[1])
def test_constructor_series(self):
index1 = ["d", "b", "a", "c"]
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
tm.assert_series_equal(s2, s1.sort_index())
def test_constructor_iterable(self):
# GH 21987
class Iter:
def __iter__(self):
for i in range(10):
yield i
expected = Series(list(range(10)), dtype="int64")
result = Series(Iter(), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_sequence(self):
# GH 21987
expected = Series(list(range(10)), dtype="int64")
result = Series(range(10), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_single_str(self):
# GH 21987
expected = Series(["abc"])
result = Series("abc")
tm.assert_series_equal(result, expected)
def test_constructor_list_like(self):
# make sure that we are coercing different
# list-likes to standard dtypes and not
# platform specific
expected = Series([1, 2, 3], dtype="int64")
for obj in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype="int64")]:
result = Series(obj, index=[0, 1, 2])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["bool", "int32", "int64", "float64"])
def test_constructor_index_dtype(self, dtype):
# GH 17088
s = Series(Index([0, 2, 4]), dtype=dtype)
assert s.dtype == dtype
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([pd.Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements from a list are converted to strings
# when dtype is str, 'str', or 'U'
result = Series(input_vals, dtype=string_dtype)
expected = Series(input_vals).astype(string_dtype)
tm.assert_series_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = Series([1.0, 2.0, np.nan], dtype=string_dtype)
expected = Series(["1.0", "2.0", np.nan], dtype=object)
tm.assert_series_equal(result, expected)
assert np.isnan(result[2])
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ["a", "b", "c"], fastpath=True)
res = Series(cat)
tm.assert_categorical_equal(res.values, cat)
# can cast to a new dtype
result = Series(pd.Categorical([1, 2, 3]), dtype="int64")
expected = pd.Series([1, 2, 3], dtype="int64")
tm.assert_series_equal(result, expected)
# GH12574
cat = Series(pd.Categorical([1, 2, 3]), dtype="category")
assert is_categorical_dtype(cat)
assert is_categorical_dtype(cat.dtype)
s = Series([1, 2, 3], dtype="category")
assert is_categorical_dtype(s)
assert is_categorical_dtype(s.dtype)
def test_constructor_categorical_with_coercion(self):
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"])
# test basic creation / coercion of categoricals
s = Series(factor, name="A")
assert s.dtype == "category"
assert len(s) == len(factor)
str(s.values)
str(s)
# in a frame
df = DataFrame({"A": factor})
result = df["A"]
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
df = DataFrame({"A": s})
result = df["A"]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
# multiples
df = DataFrame({"A": s, "B": s, "C": 1})
result1 = df["A"]
result2 = df["B"]
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
assert result2.name == "B"
assert len(df) == len(factor)
str(df.values)
str(df)
# GH8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
assert result == expected
result = x.person_name[0]
assert result == expected
result = x.person_name.loc[0]
assert result == expected
def test_constructor_categorical_dtype(self):
result = pd.Series(
["a", "b"], dtype=CategoricalDtype(["a", "b", "c"], ordered=True)
)
assert | is_categorical_dtype(result.dtype) | pandas.core.dtypes.common.is_categorical_dtype |
from pathlib import Path
import numpy as np
import pandas as pd
from tqdm import tqdm
zheng_all_files = [el.as_posix() for el in Path('ecg_data/zheng2020').glob('**/*.*')]
zheng_all_files.sort()
assert len(zheng_all_files) == 10651
all_data = []
for el in tqdm(zheng_all_files):
if el.endswith('.csv'):
tmp_data = pd.read_csv(el)
assert np.all(tmp_data.columns == ['I', 'II', 'III', 'aVR', 'aVL', 'aVF', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6'])
tmp_data = tmp_data.to_numpy()
all_data.append(tmp_data)
for el in all_data:
assert el.shape == (5000, 12)
all_data = np.asarray(all_data)
all_data = all_data.transpose(0, 2, 1)
meta_info = | pd.read_excel('ecg_data/zheng2020/Diagnostics.xlsx') | pandas.read_excel |
import rebound
import numpy as np
import pandas as pd
import multiprocessing
from collections import OrderedDict
from celmech.poincare import Poincare, PoincareHamiltonian
from celmech import Andoyer, AndoyerHamiltonian
from celmech.resonances import resonant_period_ratios, resonance_intersections_list, resonance_pratio_span
from celmech.transformations import masses_to_jacobi
from celmech.andoyer import get_num_fixed_points
import itertools
def collision(reb_sim, col):
reb_sim.contents._status = 5
return 0
def safe_run_func(runfunc):
def new_run_func(*args, **kwargs):
try:
return runfunc(*args, **kwargs)
except RuntimeError:
return None
return new_run_func
from scipy.optimize import brenth
def F(e,alpha,gamma):
"""Equation 35 of Laskar & Petit (2017)"""
denom = np.sqrt(alpha*(1-e*e)+gamma*gamma*e*e)
return alpha*e -1 + alpha + gamma*e / denom
### start AMD functions
def critical_relative_AMD(alpha,gamma):
"""Equation 29"""
e0 = np.min((1,1/alpha-1))
ec = brenth(F,0,e0,args=(alpha,gamma))
e1c = np.sin(np.arctan(gamma*ec / np.sqrt(alpha*(1-ec*ec))))
curlyC = gamma*np.sqrt(alpha) * (1-np.sqrt(1-ec*ec)) + (1 - np.sqrt(1-e1c*e1c))
return curlyC
@safe_run_func
def compute_AMD(sim):
pstar = sim.particles[0]
Ltot = pstar.m * np.cross(pstar.xyz,pstar.vxyz)
ps = sim.particles[1:]
Lmbda=np.zeros(len(ps))
G = np.zeros(len(ps))
Lhat = np.zeros((len(ps),3))
for k,p in enumerate(sim.particles[1:]):
orb = p.calculate_orbit(primary=pstar)
Lmbda[k] = p.m * np.sqrt(p.a)
G[k] = Lmbda[k] * np.sqrt(1-p.e*p.e)
hvec = np.cross(p.xyz,p.vxyz)
Lhat[k] = hvec / np.linalg.norm(hvec)
Ltot = Ltot + p.m * hvec
cosi = np.array([Lh.dot(Ltot) for Lh in Lhat]) / np.linalg.norm(Ltot)
return np.sum(Lmbda) - np.sum(G * cosi)
@safe_run_func
def AMD_stable_Q(sim):
AMD = compute_AMD(sim)
pstar = sim.particles[0]
ps = sim.particles[1:]
for i in range(len(ps)-1):
pIn = ps[i]
pOut = ps[i+1]
orbIn = pIn.calculate_orbit(pstar)
orbOut = pOut.calculate_orbit(pstar)
alpha = orbIn.a / orbOut.a
gamma = pIn.m / pOut.m
LmbdaOut = pOut.m * np.sqrt(orbOut.a)
Ccrit = critical_relative_AMD(alpha,gamma)
C = AMD / LmbdaOut
if C>Ccrit:
return False
return True
@safe_run_func
def AMD_stability_coefficients(sim):
AMD = compute_AMD(sim)
pstar = sim.particles[0]
ps = sim.particles[1:]
coeffs = np.zeros(len(ps)-1)
for i in range(len(ps)-1):
pIn = ps[i]
pOut = ps[i+1]
orbIn = pIn.calculate_orbit(pstar)
orbOut = pOut.calculate_orbit(pstar)
alpha = orbIn.a / orbOut.a
gamma = pIn.m / pOut.m
LmbdaOut = pOut.m * np.sqrt(orbOut.a)
Ccrit = critical_relative_AMD(alpha,gamma)
C = AMD / LmbdaOut
coeffs[i] = C / Ccrit
return coeffs
def AMD_stability_coefficient(sim, i1, i2):
AMD = compute_AMD(sim)
ps = sim.particles
pstar = ps[0]
pIn = ps[i1]
pOut = ps[i2]
orbIn = pIn.calculate_orbit(pstar)
orbOut = pOut.calculate_orbit(pstar)
alpha = orbIn.a / orbOut.a
gamma = pIn.m / pOut.m
LmbdaOut = pOut.m * np.sqrt(orbOut.a)
Ccrit = critical_relative_AMD(alpha,gamma)
C = AMD / LmbdaOut
return C / Ccrit
### end AMD functions
# write functions to take args and unpack them at top so it's clear what you have to pass in args
@safe_run_func
def orbtseries(sim, args, trio):
Norbits = args[0]
Nout = args[1]
val = np.zeros((Nout, 19))
###############################
sim.collision_resolve = collision
sim.ri_whfast.keep_unsynchronized = 1
###############################
# Chunk above should be the same in all runfuncs we write in order to match simarchives
# Fill in values below
times = np.linspace(0, Norbits*sim.particles[1].P, Nout) # TTV systems don't have ps[1].P=1, so must multiply!
P0 = sim.particles[1].P
a0 = sim.particles[1].a
for i, time in enumerate(times):
try:
sim.integrate(time, exact_finish_time=0)
except:
break
orbits = sim.calculate_orbits()
skipped = 0
for j, o in enumerate(orbits):
#print(j, trio)
if j+1 not in trio:
skipped += 1
continue
#print(j, 'actually in', trio, skipped)
val[i,0] = sim.t/P0
val[i,6*(j-skipped)+1] = o.a/a0
val[i,6*(j-skipped)+2] = o.e
val[i,6*(j-skipped)+3] = o.inc
val[i,6*(j-skipped)+4] = o.Omega
val[i,6*(j-skipped)+5] = o.pomega
val[i,6*(j-skipped)+6] = o.M
return val
@safe_run_func
def orbsummaryfeaturesxgb(sim, args):
Norbits = args[0]
Nout = args[1]
window = args[2]
###############################
sim.collision_resolve = collision
sim.ri_whfast.keep_unsynchronized = 1
##############################
times = np.linspace(0, Norbits*sim.particles[1].P, Nout) # TTV systems don't have ps[1].P=1, so must multiply!
ps = sim.particles
P0 = ps[1].P
Nout = len(times)
features = OrderedDict()
AMDcoeffs = AMD_stability_coefficients(sim)
features["C_AMD12"] = AMDcoeffs[0]
features["C_AMD23"] = AMDcoeffs[1]
features["C_AMD_max"] = np.max(AMDcoeffs)
a = np.zeros((sim.N,Nout))
e = np.zeros((sim.N,Nout))
inc = np.zeros((sim.N,Nout))
beta12 = np.zeros(Nout)
beta23 = np.zeros(Nout)
Rhill12 = ps[1].a*((ps[1].m+ps[2].m)/3.)**(1./3.)
Rhill23 = ps[2].a*((ps[2].m+ps[3].m)/3.)**(1./3.)
eHill = [0, Rhill12/ps[1].a, max(Rhill12, Rhill23)/ps[2].a, Rhill23/ps[3].a]
daOvera = [0, (ps[2].a-ps[1].a)/ps[1].a, min(ps[3].a-ps[2].a, ps[2].a-ps[1].a)/ps[2].a, (ps[3].a-ps[2].a)/ps[3].a]
for i, t in enumerate(times):
for j in [1,2,3]:
a[j,i] = ps[j].a
e[j,i] = ps[j].e
inc[j,i] = ps[j].inc
# mutual hill radii since that's what goes into Hill stability
Rhill12 = ps[1].a*((ps[1].m+ps[2].m)/3.)**(1./3.)
Rhill23 = ps[2].a*((ps[2].m+ps[3].m)/3.)**(1./3.)
beta12[i] = (ps[2].a - ps[1].a)/Rhill12
beta23[i] = (ps[3].a - ps[2].a)/Rhill23
try:
sim.integrate(t, exact_finish_time=0)
except:
break
features['t_final_short'] = sim.t/P0
for string, feature in [("beta12", beta12), ("beta23", beta23)]:
mean = feature.mean()
std = feature.std()
features["avg_"+string] = mean
features["std_"+string] = std
features["min_"+string] = min(feature)
features["max_"+string] = max(feature)
for j in [1,2,3]:
for string, feature in [('a', a), ('e', e), ('inc', inc)]:
mean = feature[j].mean()
std = feature[j].std()
features['avg_'+string+str(j)] = mean
features['std_'+string+str(j)] = std
features['max_'+string+str(j)] = feature[j].max()
features['min_'+string+str(j)] = feature[j].min()
features['norm_std_'+string+str(j)] = std/mean
features['norm_max_'+string+str(j)] = np.abs(feature[j] - mean).max()/mean
sample = feature[j][:window]
samplemean = sample.mean()
features['norm_std_window'+str(window)+'_'+string+str(j)] = sample.std()/samplemean
features['norm_max_window'+str(window)+'_'+string+str(j)] = np.abs(sample - samplemean).max()/samplemean
for string, feature in [('eH', e), ('iH', inc)]:
mean = feature[j].mean()
std = feature[j].std()
features['avg_'+string+str(j)] = mean/eHill[j]
features['std_'+string+str(j)] = std/eHill[j]
features['max_'+string+str(j)] = feature[j].max()/eHill[j]
features['min_'+string+str(j)] = feature[j].min()/eHill[j]
string, feature = ('ecross', e)
features['avg_'+string+str(j)] = mean/daOvera[j]
features['std_'+string+str(j)] = std/daOvera[j]
features['max_'+string+str(j)] = feature[j].max()/daOvera[j]
features['min_'+string+str(j)] = feature[j].min()/daOvera[j]
xx = range(a[j].shape[0])
yy = a[j]/a[j].mean()/features["t_final_short"]
par = np.polyfit(xx, yy, 1, full=True)
features['norm_a'+str(j)+'_slope'] = par[0][0]
return pd.Series(features, index=list(features.keys()))
def findres(sim, i1, i2):
delta = 0.03
maxorder = 2
ps = Poincare.from_Simulation(sim=sim).particles # get averaged mean motions
n1 = ps[i1].n
n2 = ps[i2].n
m1 = ps[i1].m
m2 = ps[i2].m
Pratio = n2/n1
if np.isnan(Pratio): # probably due to close encounter where averaging step doesn't converge
return np.nan, np.nan, np.nan
res = resonant_period_ratios(Pratio-delta,Pratio+delta, order=maxorder)
Z = np.sqrt((ps[i1].e*np.cos(ps[i1].pomega) - ps[i2].e*np.cos(ps[i2].pomega))**2 + (ps[i1].e*np.sin(ps[i1].pomega) - ps[i2].e*np.sin(ps[i2].pomega))**2)
maxstrength = 0
j, k, i1, i2, strength = -1, -1, -1, -1, -1
for a, b in res:
s = np.abs(np.sqrt(m1+m2)*Z**((b-a)/2.)/(b*n2 - a*n1))
#print('{0}:{1}'.format(b, a), (b*n2 - a*n1), s)
if s > maxstrength:
j = b
k = b-a
i1 = 1
i2 = 2
strength=s
maxstrength = s
return j, k, strength
def findres2(sim, i1, i2):
maxorder = 2
ps = Poincare.from_Simulation(sim=sim).particles # get averaged mean motions
n1 = ps[i1].n
n2 = ps[i2].n
m1 = ps[i1].m/ps[i1].M
m2 = ps[i2].m/ps[i2].M
Pratio = n2/n1
if np.isnan(Pratio): # probably due to close encounter where averaging step doesn't converge
return np.nan, np.nan, np.nan
delta = 0.03
minperiodratio = max(Pratio-delta, 0.)
maxperiodratio = min(Pratio+delta, 0.999) # too many resonances close to 1
res = resonant_period_ratios(minperiodratio,maxperiodratio, order=2)
Z = np.sqrt((ps[i1].e*np.cos(ps[i1].pomega) - ps[i2].e*np.cos(ps[i2].pomega))**2 + (ps[i1].e*np.sin(ps[i1].pomega) - ps[i2].e*np.sin(ps[i2].pomega))**2)
Zcross = (ps[i2].a-ps[i1].a)/ps[i1].a
j, k, i1, i2, maxstrength = -1, -1, -1, -1, -1
for a, b in res:
s = np.abs(np.sqrt(m1+m2)*(Z/Zcross)**((b-a)/2.)/((b*n2 - a*n1)/n1))
#print('{0}:{1}'.format(b, a), (b*n2 - a*n1), s)
if s > maxstrength:
j = b
k = b-a
maxstrength = s
if maxstrength > -1:
return j, k, maxstrength
else:
return np.nan, np.nan, np.nan
def findresv3(sim, i1, i2):
maxorder = 2
try:
ps = Poincare.from_Simulation(sim=sim, average=False).particles # get averaged mean motions
except:
return np.nan, np.nan, np.nan
n1 = ps[i1].n
n2 = ps[i2].n
m1 = ps[i1].m/ps[i1].M
m2 = ps[i2].m/ps[i2].M
Pratio = n2/n1
if np.isnan(Pratio): # probably due to close encounter where averaging step doesn't converge
return np.nan, np.nan, np.nan
delta = 0.03
minperiodratio = max(Pratio-delta, 0.)
maxperiodratio = min(Pratio+delta, 0.999) # too many resonances close to 1
res = resonant_period_ratios(minperiodratio,maxperiodratio, order=2)
Z = np.sqrt((ps[i1].e*np.cos(ps[i1].pomega) - ps[i2].e*np.cos(ps[i2].pomega))**2 + (ps[i1].e*np.sin(ps[i1].pomega) - ps[i2].e*np.sin(ps[i2].pomega))**2)
Zcross = (ps[i2].a-ps[i1].a)/ps[i1].a
j, k, i1, i2, maxstrength = -1, -1, -1, -1, -1
for a, b in res:
s = np.abs(np.sqrt(m1+m2)*(Z/Zcross)**((b-a)/2.)/((b*n2 - a*n1)/n1))
#print('{0}:{1}'.format(b, a), (b*n2 - a*n1), s)
if s > maxstrength:
j = b
k = b-a
maxstrength = s
return j, k, maxstrength
@safe_run_func
def normressummaryfeaturesxgb(sim, args):
ps = sim.particles
Mstar = ps[0].m
P1 = ps[1].P
sim2 = rebound.Simulation()
sim2.G = 4*np.pi**2
sim2.add(m=1.)
for p in ps[1:]:
sim2.add(m=p.m/Mstar, P=p.P/P1, e=p.e, inc=p.inc, pomega=p.pomega, Omega=p.Omega, theta=p.theta)
sim2.move_to_com()
sim2.integrator="whfast"
sim2.dt=sim2.particles[1].P*2.*np.sqrt(3)/100.
return ressummaryfeaturesxgb(sim2, args)
@safe_run_func
def ressummaryfeaturesxgb(sim, args):
Norbits = args[0]
Nout = args[1]
###############################
sim.collision_resolve = collision
sim.ri_whfast.keep_unsynchronized = 1
sim.ri_whfast.safe_mode = 0
##############################
features = OrderedDict()
try:
AMDcoeffs = AMD_stability_coefficients(sim)
features["C_AMD12"] = AMDcoeffs[0]
features["C_AMD23"] = AMDcoeffs[1]
features["C_AMD_max"] = np.max(AMDcoeffs)
except:
features["C_AMD12"] = np.nan
features["C_AMD23"] = np.nan
features["C_AMD_max"] = np.nan
ps = sim.particles
sim.init_megno(seed=0)
N = sim.N - sim.N_var
a0 = [0] + [sim.particles[i].a for i in range(1, N)]
Npairs = int((N-1)*(N-2)/2)
js, ks, strengths = np.zeros(Npairs), np.zeros(Npairs), np.zeros(Npairs)
maxj, maxk, maxi1, maxi2, maxpairindex, maxstrength = -1, -1, -1, -1, -1, -1
Zcross = np.zeros(Npairs)
#print('pairindex, i1, i2, j, k, strength')
for i, [i1, i2] in enumerate(itertools.combinations(np.arange(1, N), 2)):
js[i], ks[i], strengths[i] = findresv3(sim, i1, i2)
Zcross[i] = (ps[int(i2)].a-ps[int(i1)].a)/ps[int(i1)].a
#print(i, i1, i2, js[i], ks[i], strengths[i])
if strengths[i] > maxstrength:
maxj, maxk, maxi1, maxi2, maxpairindex, maxstrength = js[i], ks[i], i1, i2, i, strengths[i]
features['Zcross12'] = Zcross[0]
features['Zcross13'] = Zcross[1]
features['Zcross23'] = Zcross[2]
features['maxj'] = maxj
features['maxk'] = maxk
features['maxi1'] = maxi1
features['maxi2'] = maxi2
features['maxstrength'] = maxstrength
sortedstrengths = strengths.copy()
sortedstrengths.sort() # ascending
if sortedstrengths[-1] > 0 and sortedstrengths[-2] > 0: # if two strongeest resonances are nonzereo
features['secondres'] = sortedstrengths[-2]/sortedstrengths[-1] # ratio of strengths
else:
features['secondres'] = -1
#print('max', maxi1, maxi2, maxj, maxk, maxpairindex, maxstrength)
#print('df (j, k, pairindex):', features['j'], features['k'], features['pairindex'])
times = np.linspace(0, Norbits*sim.particles[1].P, Nout)
eminus = np.zeros((Npairs, Nout))
rebound_Z, rebound_phi = np.zeros((Npairs,Nout)), np.zeros((Npairs,Nout))
rebound_Zcom, rebound_phiZcom = np.zeros((Npairs,Nout)), np.zeros((Npairs,Nout))
rebound_Zstar, rebound_dKprime = np.zeros((Npairs,Nout)), np.zeros((Npairs,Nout))
celmech_Z, celmech_phi = np.zeros(Nout), np.zeros(Nout)
celmech_Zcom, celmech_phiZcom = np.zeros(Nout), np.zeros(Nout)
celmech_Zstar, celmech_dKprime = np.zeros(Nout), np.zeros(Nout)
for i,t in enumerate(times):
for j, [i1, i2] in enumerate(itertools.combinations(np.arange(1, N), 2)):
i1, i2 = int(i1), int(i2)
eminus[j, i] = np.sqrt((ps[i2].e*np.cos(ps[i2].pomega)-ps[i1].e*np.cos(ps[i1].pomega))**2 + (ps[i2].e*np.sin(ps[i2].pomega)-ps[i1].e*np.sin(ps[i1].pomega))**2)
if js[j] != -1:
pvars = Poincare.from_Simulation(sim, average=False)
avars = Andoyer.from_Poincare(pvars, j=int(js[j]), k=int(ks[j]), a10=a0[i1], i1=i1, i2=i2)
rebound_Z[j, i] = avars.Z
rebound_phi[j, i] = avars.phi
rebound_Zcom[j, i] = avars.Zcom
rebound_phiZcom[j, i] = avars.phiZcom
rebound_Zstar[j, i] = avars.Zstar
rebound_dKprime[j, i] = avars.dKprime
try:
sim.integrate(t, exact_finish_time=0)
except:
break
mask = eminus[0] > 0 # where there are data points in case sim ends early
times = times[mask]
eminus = eminus[:, mask]
rebound_Z, rebound_phi = rebound_Z[:, mask], rebound_phi[:, mask]
rebound_Zcom, rebound_phiZcom = rebound_Zcom[:, mask], rebound_phiZcom[:, mask]
rebound_Zstar, rebound_dKprime = rebound_Zstar[:, mask], rebound_dKprime[:, mask]
celmech_Z, celmech_phi, celmech_Zcom, celmech_phiZcom = celmech_Z[mask], celmech_phi[mask], celmech_Zcom[mask], celmech_phiZcom[mask]
celmech_Zstar, celmech_dKprime = celmech_Zstar[mask], celmech_dKprime[mask]
for i, s in zip([0,2], ['12', '23']): # take adjacent ones
EM = eminus[i]
Zc = Zcross[i]
features['EMmed'+s] = np.median(EM)/Zc
features['EMmax'+s] = EM.max()/Zc
try:
p = np.poly1d(np.polyfit(times, EM, 3))
m = p(times)
EMdrift = np.abs((m[-1]-m[0])/m[0])
features['EMdrift'+s] = EMdrift
except:
features['EMdrift'+s] = np.nan
maxindex = (m == m.max()).nonzero()[0][0] # index where cubic polynomial fit to EM reaches max to track long wavelength variations (secular?)
if EMdrift > 0.1 and (maxindex < 0.01*Nout or maxindex > 0.99*Nout): # don't flag as not capturing secular if Z isn't varying significantly in first place
features['capseculartscale'+s] = 0
else:
features['capseculartscale'+s] = 1
features['EMdetrendedstd'+s] = pd.Series(EM-m).std()/EM[0]
rollstd = pd.Series(EM).rolling(window=100).std()
features['EMrollingstd'+s] = rollstd[100:].median()/EM[0]
var = [EM[:j].var() for j in range(len(EM))]
try:
p = np.poly1d(np.polyfit(times[len(var)//2:], var[len(var)//2:], 1)) # fit only second half to get rid of transient
features['DiffcoeffEM'+s] = p[1]/Zc**2
except:
features['DiffcoeffEM'+s] = np.nan
features['medvarEM'+s] = np.median(var[len(var)//2:])/Zc**2
if strengths[i] != -1:
Z = rebound_Z[i]
features['Zmed'+s] = np.median(Z)/Zc
features['Zmax'+s] = rebound_Z[i].max()/Zc
try:
p = np.poly1d(np.polyfit(times, Z, 3))
m = p(times)
features['Zdetrendedstd'+s] = pd.Series(Z-m).std()/Z[0]
except:
features['Zdetrendedstd'+s] = np.nan
rollstd = pd.Series(Z).rolling(window=100).std()
features['Zrollingstd'+s] = rollstd[100:].median()/Z[0]
var = [Z[:j].var() for j in range(len(Z))]
try:
p = np.poly1d(np.polyfit(times[len(var)//2:], var[len(var)//2:], 1)) # fit only second half to get rid of transient
features['DiffcoeffZ'+s] = p[1]/Zc**2
except:
features['DiffcoeffZ'+s] = np.nan
features['medvarZ'+s] = np.median(var[len(var)//2:])/Zc**2
features['Zcomdrift'+s] = np.max(np.abs(rebound_Zcom[i]-rebound_Zcom[i, 0])/rebound_Zcom[i, 0])
rollstd = pd.Series(rebound_Zcom[i]).rolling(window=100).std()
features['Zcomrollingstd'+s] = rollstd[100:].median()/rebound_Zcom[i,0]
features['phiZcomdrift'+s] = np.max(np.abs(rebound_phiZcom[i]-rebound_phiZcom[i, 0]))
rollstd = pd.Series(rebound_phiZcom[i]).rolling(window=100).std()
features['phiZcomrollingstd'+s] = rollstd[100:].median()
features['Zstardrift'+s] = np.max(np.abs(rebound_Zstar[i]-rebound_Zstar[i, 0])/rebound_Zstar[i, 0])
rollstd = pd.Series(rebound_Zstar[i]).rolling(window=100).std()
features['Zstarrollingstd'+s] = rollstd[100:].median()/rebound_Zstar[i,0]
Zcosphi = Z*np.cos(rebound_phi[i])
features['Zcosphistd'+s] = Zcosphi.std()/Zc
features['medZcosphi'+s] = np.median(Zcosphi)/Zc
else:
features['Zmed'+s] = -1
features['Zmax'+s] = -1
features['Zdetrendedstd'+s] = -1
features['Zrollingstd'+s] = -1
features['DiffcoeffZ'+s] = -1
features['medvarZ'+s] = -1
features['Zcomdrift'+s] = -1
features['Zcomrollingstd'+s] = -1
features['phiZcomdrift'+s] = -1
features['phiZcomrollingstd'+s] = -1
features['Zstardrift'+s] = -1
features['Zstarrollingstd'+s] = -1
features['Zcosphistd'+s] = -1
features['medZcosphi'+s] = -1
tlyap = 1./np.abs(sim.calculate_lyapunov())
if tlyap > Norbits:
tlyap = Norbits
features['tlyap'] = tlyap
features['megno'] = sim.calculate_megno()
return pd.Series(features, index=list(features.keys()))
@safe_run_func
def ressummaryfeaturesxgb2(sim, args):
Norbits = args[0]
Nout = args[1]
###############################
sim.collision_resolve = collision
sim.ri_whfast.keep_unsynchronized = 1
sim.ri_whfast.safe_mode = 0
##############################
features = OrderedDict()
AMDcoeffs = AMD_stability_coefficients(sim)
features["C_AMD12"] = AMDcoeffs[0]
features["C_AMD23"] = AMDcoeffs[1]
features["C_AMD_max"] = np.max(AMDcoeffs)
ps = sim.particles
sim.init_megno()
N = sim.N - sim.N_var
a0 = [0] + [sim.particles[i].a for i in range(1, N)]
Npairs = int((N-1)*(N-2)/2)
js, ks, strengths = np.zeros(Npairs, dtype=np.int), np.zeros(Npairs, dtype=np.int), np.zeros(Npairs)
maxj, maxk, maxi1, maxi2, maxpairindex, maxstrength = -1, -1, -1, -1, -1, -1
Zcross = np.zeros(Npairs)
#print('pairindex, i1, i2, j, k, strength')
for i, [i1, i2] in enumerate(itertools.combinations(np.arange(1, N), 2)):
js[i], ks[i], strengths[i] = findresv3(sim, i1, i2)
Zcross[i] = (ps[int(i2)].a-ps[int(i1)].a)/ps[int(i1)].a
#print(i, i1, i2, js[i], ks[i], strengths[i])
if strengths[i] > maxstrength:
maxj, maxk, maxi1, maxi2, maxpairindex, maxstrength = js[i], ks[i], i1, i2, i, strengths[i]
features['Zcross12'] = Zcross[0]
features['Zcross13'] = Zcross[1]
features['Zcross23'] = Zcross[2]
features['maxj'] = maxj
features['maxk'] = maxk
features['maxi1'] = maxi1
features['maxi2'] = maxi2
features['maxstrength'] = maxstrength
sortedstrengths = strengths.copy()
sortedstrengths.sort()
if sortedstrengths[-1] > 0 and sortedstrengths[-2] > 0:
features['secondres'] = sortedstrengths[-2]/sortedstrengths[-1]
else:
features['secondres'] = -1
#print('max', maxi1, maxi2, maxj, maxk, maxpairindex, maxstrength)
#print('df (j, k, pairindex):', features['j'], features['k'], features['pairindex'])
P0 = sim.particles[1].P
times = np.linspace(0, Norbits, Nout)
eminus = np.zeros((Npairs, Nout))
rebound_Z, rebound_phi = np.zeros((Npairs,Nout)), np.zeros((Npairs,Nout))
rebound_Zcom, rebound_phiZcom = np.zeros((Npairs,Nout)), np.zeros((Npairs,Nout))
rebound_Zstar, rebound_dKprime = np.zeros((Npairs,Nout)), np.zeros((Npairs,Nout))
celmech_Z, celmech_phi = np.zeros(Nout), np.zeros(Nout)
celmech_Zcom, celmech_phiZcom = np.zeros(Nout), np.zeros(Nout)
celmech_Zstar, celmech_dKprime = np.zeros(Nout), np.zeros(Nout)
for i,t in enumerate(times):
for j, [i1, i2] in enumerate(itertools.combinations(np.arange(1, N), 2)):
i1, i2 = int(i1), int(i2)
eminus[j, i] = np.sqrt((ps[i2].e*np.cos(ps[i2].pomega)-ps[i1].e*np.cos(ps[i1].pomega))**2 + (ps[i2].e*np.sin(ps[i2].pomega)-ps[i1].e*np.sin(ps[i1].pomega))**2)
if js[j] != -1:
pvars = Poincare.from_Simulation(sim)
avars = Andoyer.from_Poincare(pvars, j=js[j], k=ks[j], a10=a0[i1], i1=i1, i2=i2)
rebound_Z[j, i] = avars.Z
rebound_phi[j, i] = avars.phi
rebound_Zcom[j, i] = avars.Zcom
rebound_phiZcom[j, i] = avars.phiZcom
rebound_Zstar[j, i] = avars.Zstar
rebound_dKprime[j, i] = avars.dKprime
try:
sim.integrate(t*P0, exact_finish_time=0)
except:
break
mask = eminus[0] > 0 # where there are data points in case sim ends early
times = times[mask]
eminus = eminus[:, mask]
rebound_Z, rebound_phi = rebound_Z[:, mask], rebound_phi[:, mask]
rebound_Zcom, rebound_phiZcom = rebound_Zcom[:, mask], rebound_phiZcom[:, mask]
rebound_Zstar, rebound_dKprime = rebound_Zstar[:, mask], rebound_dKprime[:, mask]
celmech_Z, celmech_phi, celmech_Zcom, celmech_phiZcom = celmech_Z[mask], celmech_phi[mask], celmech_Zcom[mask], celmech_phiZcom[mask]
celmech_Zstar, celmech_dKprime = celmech_Zstar[mask], celmech_dKprime[mask]
for i, s in zip([0,2], ['12', '23']): # take adjacent ones
EM = eminus[i]
Zc = Zcross[i]
features['EMmed'+s] = np.median(EM)/Zc
features['EMmax'+s] = EM.max()/Zc
try:
p = np.poly1d(np.polyfit(times, EM, 3))
m = p(times)
EMdrift = np.abs((m[-1]-m[0])/m[0])
features['EMdrift'+s] = EMdrift
except:
features['EMdrift'+s] = np.nan
maxindex = (m == m.max()).nonzero()[0][0] # index where cubic polynomial fit to EM reaches max to track long wavelength variations (secular?)
if EMdrift > 0.1 and (maxindex < 0.01*Nout or maxindex > 0.99*Nout): # don't flag as not capturing secular if Z isn't varying significantly in first place
features['capseculartscale'+s] = 0
else:
features['capseculartscale'+s] = 1
features['EMdetrendedstd'+s] = pd.Series(EM-m).std()/EM[0]
rollstd = pd.Series(EM).rolling(window=100).std()
features['EMrollingstd'+s] = rollstd[100:].median()/EM[0]
var = [EM[:j].var() for j in range(len(EM))]
try:
p = np.poly1d(np.polyfit(times[len(var)//2:], var[len(var)//2:], 1)) # fit only second half to get rid of transient
features['DiffcoeffEM'+s] = p[1]/Zc**2
except:
features['DiffcoeffEM'+s] = np.nan
features['medvarEM'+s] = np.median(var[len(var)//2:])/Zc**2
if strengths[i] != -1:
Z = rebound_Z[i]
features['Zmed'+s] = np.median(Z)/Zc
features['Zmax'+s] = rebound_Z[i].max()/Zc
try:
p = np.poly1d(np.polyfit(times, Z, 3))
m = p(times)
features['Zdetrendedstd'+s] = pd.Series(Z-m).std()/Z[0]
except:
features['Zdetrendedstd'+s] = np.nan
rollstd = pd.Series(Z).rolling(window=100).std()
features['Zrollingstd'+s] = rollstd[100:].median()/Z[0]
var = [Z[:j].var() for j in range(len(Z))]
try:
p = np.poly1d(np.polyfit(times[len(var)//2:], var[len(var)//2:], 1)) # fit only second half to get rid of transient
features['DiffcoeffZ'+s] = p[1]/Zc**2
except:
features['DiffcoeffZ'+s] = np.nan
features['medvarZ'+s] = np.median(var[len(var)//2:])/Zc**2
features['Zcomdrift'+s] = np.max(np.abs(rebound_Zcom[i]-rebound_Zcom[i, 0])/rebound_Zcom[i, 0])
rollstd = | pd.Series(rebound_Zcom[i]) | pandas.Series |
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
DataFrame that includes SAS metadata (formats, labels, titles)
'''
from __future__ import print_function, division, absolute_import, unicode_literals
import collections
import datetime
import json
import re
import pandas as pd
import six
from .cas.table import CASTable
from .utils.compat import (a2u, a2n, int32, int64, float64, int32_types,
int64_types, float64_types, bool_types, text_types,
binary_types)
from .utils import dict2kwargs
from .clib import errorcheck
from .formatter import SASFormatter
def dtype_from_var(value):
''' Guess the CAS data type from the value '''
if isinstance(value, int64_types):
return 'int64'
if isinstance(value, int32_types):
return 'int32'
if isinstance(value, float64_types):
return 'double'
if isinstance(value, text_types):
return 'varchar'
if isinstance(value, binary_types):
return 'varbinary'
if isinstance(value, datetime.datetime):
return 'datetime'
if isinstance(value, datetime.date):
return 'date'
if isinstance(value, datetime.time):
return 'time'
raise TypeError('Unrecognized type for value: %s' % value)
def split_format(fmt):
''' Split a SAS format name into components '''
if not fmt:
sasfmt = collections.namedtuple('SASFormat', ['ischar', 'name', 'width', 'ndec'])
return sasfmt(False, '', 0, 0)
parts = list(re.match(r'(\$)?(\w*?)(\d*)\.(\d*)', fmt).groups())
parts[0] = parts[0] and True or False
parts[2] = parts[2] and int(parts[2]) or 0
parts[3] = parts[3] and int(parts[3]) or 0
sasfmt = collections.namedtuple('SASFormat', ['ischar', 'name', 'width', 'ndec'])
return sasfmt(*parts)
def concat(objs, **kwargs):
'''
Concatenate :class:`SASDataFrames` while preserving table and column metadata
This function is equivalent to :func:`pandas.concat` except that it also
preserves metadata in :class:`SASDataFrames`. It can be used on standard
:class:`pandas.DataFrames` as well.
Parameters
----------
objs : a sequence of mapping of Series, (SAS)DataFrame, or Panel objects
The DataFrames to concatenate.
**kwargs : any, optional
Additional arguments to pass to :func:`pandas.concat`.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.read_csv('data/cars.csv')
>>> out = tbl.groupby('Origin').summary()
>>> print(concat([out['ByGroup1.Summary'], out['ByGroup2.Summary'],
... out['ByGroup3.Summary']]))
Returns
-------
:class:`SASDataFrame`
'''
proto = objs[0]
if not isinstance(proto, SASDataFrame):
return | pd.concat(objs, **kwargs) | pandas.concat |
import warnings # 忽略警告
warnings.filterwarnings('ignore')
import pandas as pd #表格和数据操作
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.tsa.arima_model import ARMA
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.api import tsa
import statsmodels.api as sm
from itertools import product
dta0=pd.read_csv('data.CSV',header=0)
dta0.index = | pd.to_datetime(dta0['date']) | pandas.to_datetime |
import pandas as pd
import numpy as np
from _data_utils import stationary_df, make_stationary
from _data_utils import df_slicing, fill_bs_date, future_rolling, make_float
from _data_utils import scaler_with_nan
from _data_jodi import jodi_read
from ml_data_preprocessing.make_data import make_data
from oil_forecastor.model_selection._utility import rolling_train_test_split, denoising_func
import copy
from sklearn.metrics import r2_score
import math
from scipy.special import gamma
def read_data_url(url, sheet_name_list, col_name_list, freq='D'):
data = pd.read_excel(url, sheet_name=sheet_name_list)
df_list_ = []
for sheet_name, col_name in zip(sheet_name_list, col_name_list):
df_ = data[sheet_name]
df_.columns = column_fill_name(df_.columns, col_name)
df_ = df_[ | pd.to_numeric(df_[col_name[1]], errors='coerce') | pandas.to_numeric |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 10 16:09:31 2022
@author: kkrao
"""
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.colors
import seaborn as sns
import init
import cartopy.crs as ccrs
import cartopy.feature as cf
from cartopy.feature import ShapelyFeature
import cartopy.io.shapereader as shpreader
import geopandas as gpd
import rasterio
import rasterio.mask
import fiona
import sklearn.metrics
import statsmodels.stats.contingency_tables
import matplotlib.ticker as ticker
sns.set(font_scale = 1., style = "ticks")
plt.style.use("pnas")
WIDTH = 3
def plot_lightnings():
df = pd.DataFrame()
for cat in ['grass','shrub','forest']:
sdf = pd.read_csv(os.path.join(init.dir_root,\
"data","r","matches",\
f"matched_22_apr_2022_extreme_{cat.lower()}.csv"))
df = df.append(sdf, ignore_index = True)
df = df.rename(columns = {"y":"fire"})
fname = r"D:\Krishna\projects\wildfire_from_lfmc\data\CA_State_TIGER2016\CA_State_TIGER2016.shp"
shdf = gpd.read_file(fname)
shdf = shdf.to_crs("EPSG:4326")
res = "high"
ax = plt.figure(figsize=(16,10)).gca(projection=ccrs.PlateCarree())
projection = ccrs.PlateCarree(central_longitude=0)
ax.add_geometries(shdf.geometry,
projection,
facecolor="None",
edgecolor='k',
linewidth = 2)
ax.set_extent([-125,-114,32,43])
ax.scatter(df.loc[df.fire==0,'longitude'], df.loc[df.fire==0,'latitude'], s = 20, color = "blueviolet", \
alpha = 1, edgecolor = "grey")
ax.scatter(df.loc[df.fire==1,'longitude'], df.loc[df.fire==1,'latitude'], s = 20, color = "yellow", \
alpha = 1, edgecolor = "grey")
raster = r"D:\Krishna\projects\grid_fire\data\nlcd\nlcd_2016_4km.tif"
src = rasterio.open(raster,'r')
left, bottom, right, top = src.bounds
raster = rasterio.mask.mask(src, shdf.geometry, crop=False)[0][0]
np.unique(raster)
raster = np.ma.masked_where(\
~np.isin(raster, list(init.thresh["extreme"].keys())), \
raster)
cmap, norm = matplotlib.colors.from_levels_and_colors([0,50,55,100],['darkgreen','darkgoldenrod','lime'])
ax.imshow(raster,
cmap=cmap, norm = norm,extent=(left, right, top, bottom), alpha = 0.4
)
ax = plt.figure(figsize=(16,10)).gca(projection=ccrs.PlateCarree())
projection = ccrs.PlateCarree(central_longitude=0)
ax.add_geometries(shdf.geometry,
projection,
facecolor="None",
edgecolor='k',
linewidth = 2)
ax.set_extent([-125,-114,32,43])
ax.scatter(df.loc[:,'longitude'], df.loc[:,'latitude'], s = 20, c = df["z_extreme"],cmap = matplotlib.colors.ListedColormap(["aqua","peru"]), \
alpha = 1, edgecolor = "grey")
# ax.scatter(df.loc[df.z_extreme==1,'longitude'], df.loc[df.z_extreme==1,'latitude'], s = 20, color = "peru", \
# alpha = 1, edgecolor = "grey")
raster = r"D:\Krishna\projects\grid_fire\data\nlcd\nlcd_2016_4km.tif"
src = rasterio.open(raster,'r')
left, bottom, right, top = src.bounds
raster = rasterio.mask.mask(src, shdf.geometry, crop=False)[0][0]
np.unique(raster)
raster = np.ma.masked_where(\
~np.isin(raster, list(init.thresh["extreme"].keys())), \
raster)
cmap, norm = matplotlib.colors.from_levels_and_colors([0,50,55,100],['darkgreen','darkgoldenrod','lime'])
ax.imshow(raster,
cmap=cmap, norm = norm,extent=(left, right, top, bottom), alpha = 0.4
)
def flatten(t):
return [item for sublist in t for item in sublist]
def reshape(row):
cols = ["lfmc","fire","lc","mean_ndvi","vpd_4m","agb","mean_t","wind"]
select = [[f"{col}_0", f"{col}_1"] for col in cols]
select = flatten(select)
if len(row) == 2:
order = np.argsort(row.lfmc)
new_row = [list(np.array(row[col])[order]) for col in cols]
new_row = flatten(new_row)
new_row = pd.Series(data = new_row, index = select)
# new_row = pd.Series(data = list(np.array(row.lfmc)[order]) +\
# list(np.array(row.y)[order]) +\
# list(np.array(row.lc)[order]),\
# index = ["lfmc_0","lfmc_1","fire_0","fire_1", "lc_0","lc_1"])
return new_row
else:
return pd.Series(data = [np.nan]*len(select), \
index = select)
def get_or_stats(df):
table = sklearn.metrics.confusion_matrix(\
np.append(df.fire_0.values,df.fire_1.values), \
np.append(np.repeat(1,len(df)),np.repeat(0,len(df))))
model = statsmodels.stats.contingency_tables.Table2x2(table)
return [model.oddsratio, model.oddsratio_confint()[1], model.oddsratio_confint()[0], model.oddsratio_pvalue(), 2*len(df)]
def plot_odds(ms = 100):
df = pd.read_csv(os.path.join(init.dir_root,\
"data","r",\
"matched_extreme_21_apr_2022.csv"))
df = df.rename(columns = {"y":"fire"})
cols = ["lfmc","fire","lc","mean_ndvi","vpd_4m","agb","mean_t","wind"]
mdf = df.groupby("match")[cols].apply(reshape)
mdf["lfmc_diff"] = mdf["lfmc_0"] - mdf["lfmc_1"]
mdf.dropna(inplace = True)
mdf["thresh"] = list(map(init.thresh["extreme"].get, mdf.lc_0))
fig = plt.figure(constrained_layout=True, figsize =(5.5,3.5))
widths = [4,3]
heights = [1,1]
spec = fig.add_gridspec(ncols=2, nrows=1, width_ratios=widths,
)
axs = []
axs.append(fig.add_subplot(spec[0, 0]))
axs.append(fig.add_subplot(spec[0, 1], sharey = axs[0]))
ax = axs[1]
or_by_lfmc = pd.DataFrame(index = init.thresh_diff.keys(), columns = ["Odds ratio", "upper","lower","p","n"])
for cat in init.thresh_diff.keys():
sdf = mdf.loc[(mdf.lfmc_diff > init.thresh_diff[cat][0])&(mdf.lfmc_diff <= init.thresh_diff[cat][1])].copy()
print(sdf.shape)
or_by_lfmc.loc[cat,:] = get_or_stats(sdf)
ax.errorbar(or_by_lfmc.index, or_by_lfmc["Odds ratio"], \
yerr = [or_by_lfmc["Odds ratio"] - or_by_lfmc["lower"],or_by_lfmc["upper"] - or_by_lfmc["Odds ratio"]], color = "grey", \
capsize = 3, zorder = -1)
ax.scatter(or_by_lfmc.index, or_by_lfmc["Odds ratio"], color = "k", edgecolor = "grey", s = ms)
ax.axhline(1, linestyle = "--",color = "grey")
ax.set_ylabel("")
ax.set_xlabel(r"$\Delta$ LFMC in matched pair")
new_labs = [f"({init.thresh_diff[cat][1]}, {init.thresh_diff[cat][0]}]" for cat in init.thresh_diff.keys()]
# ax.set_xlabel("Range of min. LFMC")
ax.set_xticklabels(new_labs)
ax = axs[0]
mdf = mdf.replace({"lc_0": init.lc_dict})
or_by_lc = pd.DataFrame(index =["All"] + list(mdf.lc_0.unique()) , columns = ["Odds ratio", "upper","lower","p","n"])
for cat in or_by_lc.index:
if cat == "All":
sdf = mdf.copy()
else:
sdf = mdf.loc[mdf.lc_0==cat].copy()
or_by_lc.loc[cat,:] = get_or_stats(sdf)
ax = axs[0]
ax.errorbar(or_by_lc.index, or_by_lc["Odds ratio"], \
yerr = [or_by_lc["Odds ratio"] - or_by_lc["lower"],\
or_by_lc["upper"] - or_by_lc["Odds ratio"]], \
color = "grey", \
capsize = 3, zorder = -1, ls='none')
ax.scatter(or_by_lc.index, or_by_lc["Odds ratio"], \
color = ['black', 'forestgreen','darkgoldenrod','lawngreen'],\
edgecolor = "grey", s = ms)
ax.set_ylim(0,12)
ax.axhline(1, linestyle = "--",color = "grey")
ax.set_ylabel("")
ax.set_xlabel("")
ax.set_ylabel("Odds ratio")
axs[0].annotate("A",xy = (-0.25,1.2), xycoords = "axes fraction")
axs[0].annotate("Odds ratio per land cover",xy = (0.5,1.1), \
xycoords = "axes fraction", weight = "bold", ha = "center")
axs[0].annotate("B",xy = (1,1.2), xycoords = "axes fraction")
axs[1].annotate(r"Odds ratio binned by $\Delta$ LFMC ",xy = (0.5,1.1), \
xycoords = "axes fraction", weight = "bold", ha = "center")
axs[0].spines['right'].set_visible(False)
axs[0].spines['top'].set_visible(False)
axs[1].spines['right'].set_visible(False)
axs[1].spines['top'].set_visible(False)
def plot_odds_separate_files(ms = 100):
cols = ["lfmc","fire","lc","mean_ndvi","vpd_4m","agb","mean_t","wind"]
or_by_lc = pd.DataFrame(index =["All","Shrub","Forest","Grass"] , columns = ["Odds ratio", "upper","lower","p","n"])
mmdf = pd.DataFrame()
for cat in or_by_lc.index:
if cat != "All":
sdf = pd.read_csv(os.path.join(init.dir_root,\
"data","r","matches",\
f"matched_22_apr_2022_extreme_{cat.lower()}.csv"))
sdf = sdf.rename(columns = {"y":"fire"})
mdf = sdf.groupby("match")[cols].apply(reshape)
or_by_lc.loc[cat,:] = get_or_stats(mdf)
mmdf = mmdf.append(mdf, ignore_index = True)
or_by_lc.loc["All",:] = get_or_stats(mmdf)
mmdf["lfmc_diff"] = mmdf["lfmc_0"] - mmdf["lfmc_1"]
or_by_lfmc = pd.DataFrame(index = init.thresh_diff.keys(), columns = ["Odds ratio", "upper","lower","p","n"])
for cat in init.thresh_diff.keys():
mdf = mmdf.loc[(mmdf.lfmc_diff > init.thresh_diff[cat][0])&\
(mmdf.lfmc_diff <= init.thresh_diff[cat][1])].copy()
or_by_lfmc.loc[cat,:] = get_or_stats(mdf)
fig, axs = plt.subplots(1, 2, figsize =(8,4), sharey = True)
ax = axs[0]
ax.errorbar(or_by_lc.index, or_by_lc["Odds ratio"], \
yerr = [or_by_lc["Odds ratio"] - or_by_lc["lower"],\
or_by_lc["upper"] - or_by_lc["Odds ratio"]], \
color = "grey", \
capsize = 3, zorder = -1, ls='none')
ax.scatter(or_by_lc.index, or_by_lc["Odds ratio"], \
color = ['black', 'darkgoldenrod','forestgreen','lawngreen'],\
edgecolor = "grey", s = ms)
ax.set_ylim(0,8)
ax.axhline(1, linestyle = "--",color = "grey")
ax.set_ylabel("")
ax.set_xlabel("")
ax.set_ylabel("Odds ratio")
ax = axs[1]
ax.errorbar(or_by_lfmc.index, or_by_lfmc["Odds ratio"], \
yerr = [or_by_lfmc["Odds ratio"] - or_by_lfmc["lower"],or_by_lfmc["upper"] - or_by_lfmc["Odds ratio"]], color = "grey", \
capsize = 3, zorder = -1)
ax.scatter(or_by_lfmc.index, or_by_lfmc["Odds ratio"], color = "k", edgecolor = "grey", s = ms)
ax.axhline(1, linestyle = "--",color = "grey")
ax.set_ylabel("")
ax.set_xlabel(r"$\Delta$ LFMC in matched pairs")
new_labs = [f"({int(init.thresh_diff[cat][1])}, {int(init.thresh_diff[cat][0])}]" \
for cat in init.thresh_diff.keys()]
# ax.set_xlabel("Range of min. LFMC")
ax.set_xticklabels(new_labs)
axs[0].annotate("A",xy = (-0.25,1.2), xycoords = "axes fraction")
axs[0].annotate("Odds ratio per land cover",xy = (0.5,1.1), \
xycoords = "axes fraction", weight = "bold", ha = "center")
axs[0].annotate("B",xy = (1,1.2), xycoords = "axes fraction")
axs[1].annotate(r"Odds ratio binned by $\Delta$ LFMC ",xy = (0.5,1.1), \
xycoords = "axes fraction", weight = "bold", ha = "center")
axs[0].spines['right'].set_visible(False)
axs[0].spines['top'].set_visible(False)
axs[1].spines['right'].set_visible(False)
axs[1].spines['top'].set_visible(False)
for i, (lc, cat) in enumerate(zip(or_by_lc.index, or_by_lfmc.index)):
delta = 0
align = "center"
if i==0:
align = "left"
delta = -0.1
elif i ==3:
align = "right"
delta = 0.1
axs[0].annotate(f"n = {or_by_lc.loc[lc, 'n']:,}", \
xy = (i+delta,-0.1), \
ha = align)
# axs[1].annotate(f"n = {or_by_lfmc.loc[cat, 'n']:,}", \
# xy = (i+delta,or_by_lfmc.loc[cat, "upper"]+0.3), \
# ha = align)
print(or_by_lc)
print(or_by_lfmc)
def plot_balance():
df = pd.read_csv(os.path.join(init.dir_root, "data","r","balance_16_mar_2022.csv"), index_col = 0)
df.index = df.index.map(init.var_names)
df = df.rename(columns = {"results.std.diff.Unadj":"Raw","results.std.diff.ms.1":"Matched"})
print(df["Matched"].abs().mean())
fig, ax = plt.subplots(figsize = (3,3.5))
ax.scatter(df.Raw, df.index, marker = "s",s = 40, color = "k")
ax.scatter(df.Matched, df.index, marker = "o",s = 40, color = "k")
ax.axvline(0, color = "k")
for i in range(len(df.index)):
if df.Matched[i]-df.Raw[i]>0:
ax.arrow(df.Raw[i], i, df.Matched[i]-df.Raw[i]-0.2, 0,
head_width = 0.1, color = "k", linewidth =0.2)
else:
ax.arrow(df.Raw[i], i, df.Matched[i]-df.Raw[i]+0.17, 0,
head_width = 0.1, color = "k", linewidth =0.2)
ax.yaxis.set_ticks_position('none')
ax.set_xlabel("Standardized difference")
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
def plot_balance_separate_files():
lcs = ["shrub","forest","grass"]
df = pd.DataFrame()
for lc in lcs:
sdf = pd.read_csv(os.path.join(init.dir_root, "data","r","balance",\
f"balance_24_apr_2022_{lc}.csv"), index_col = 0)
sdf.index = sdf.index.map(init.var_names)
sdf = sdf.rename(columns = {"results.std.diff.Unadj":"Raw","results.std.diff.ms.1":"Matched"})
sdf= sdf[["Raw","Matched"]]
sdf["n"] = pd.read_csv(os.path.join(init.dir_root,\
"data","r","matches",\
f"matched_22_apr_2022_extreme_{lc}.csv")).shape[0]
sdf.columns = [col + f"_{lc}" for col in sdf.columns]
df = df.join(sdf, how = "outer")
for col in ["Raw","Matched"]:
df[col] = (df[f"{col}_shrub"]*df["n_shrub"]+\
df[f"{col}_forest"]*df["n_forest"]+\
df[f"{col}_grass"]*df["n_grass"])/\
(df["n_shrub"]+df["n_forest"]+df["n_grass"])
print(df["Matched"].abs().mean())
fig, ax = plt.subplots(figsize = (3,3.5))
ax.scatter(df.Raw, df.index, marker = "s",s = 40, color = "k")
ax.scatter(df.Matched, df.index, marker = "o",s = 40, color = "k")
ax.axvline(0, color = "k")
for i in range(len(df.index)):
if df.Matched[i]-df.Raw[i]>0:
ax.arrow(df.Raw[i], i, df.Matched[i]-df.Raw[i]-0.2, 0,
head_width = 0.1, color = "k", linewidth =0.2)
else:
ax.arrow(df.Raw[i], i, df.Matched[i]-df.Raw[i]+0.17, 0,
head_width = 0.1, color = "k", linewidth =0.2)
ax.yaxis.set_ticks_position('none')
ax.set_xlabel("Standardized difference")
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.set_xlim(-1.5,1.5)
def plot_gamma():
fig, ax = plt.subplots(figsize = (3,3))
for lc in ["all"]:
sdf = pd.read_csv(os.path.join(init.dir_root,\
"data","r","pvalues",\
f"p_gamma_24_apr_2022_{lc}.csv"), usecols = [1,2],index_col = 0)
sdf.plot(ax = ax, color = init.lc_color[lc], linewidth = 1, legend = False)
x = sdf.index[(sdf['pvalue']-0.05).abs().argmin()]
ax.hlines(0.05,1,x, color = "grey")
ax.vlines(x,0,0.05, color = "grey")
ax.set_ylabel("P value")
ax.set_xlabel("Confounding ratio")
ax.set_xlim(1,1.6)
ax.set_ylim(0,0.15)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.annotate(f"{x:0.2f}", xy = (x, 0.005))
def confusion_matrix():
df = pd.read_csv(os.path.join(os.path.join(init.dir_root, "data","r","rct_22_apr_2022.csv")))
df = sklearn.metrics.confusion_matrix(df.fire, df.z_extreme)
print(df)
print(sum(sum(df)))
# sdf = df.loc[df.lc==71]
# print(sklearn.metrics.confusion_matrix(sdf.fire, sdf.z_high))
# df = pd.read_csv(os.path.join(init.dir_root,\
# "data","r",\
# "matched_extreme_3_mar_2022.csv"))
df = np.zeros((2,2))
for lc in ["Shrub","Forest","Grass"]:
sdf = pd.read_csv(os.path.join(init.dir_root,\
"data","r","matches",\
f"matched_22_apr_2022_extreme_{lc.lower()}.csv"))
sdf = sklearn.metrics.confusion_matrix(sdf.y, sdf.z_extreme)
print(lc)
print(sdf)
df += sdf
print(df)
print(sum(sum(df)))
def plot_my_balance():
df = pd.read_csv(os.path.join(os.path.join(init.dir_root, "data","r","rct_22_apr_2022.csv")))
rows = ['longitude','latitude','agb','wind','vpd_4m','ppt_1y']
balance_raw = pd.pivot_table(df, values = rows, columns = ["z_extreme"])
std = df[rows].std()
balance_raw = (balance_raw[1] - balance_raw[0])/std
df = pd.read_csv(os.path.join(init.dir_root,\
"data","r",\
"matched_extreme_21_apr_2022.csv"))
balance_matched = pd.pivot_table(df, values = rows, columns = ["z_extreme"])
balance_matched = (balance_matched[1] - balance_matched[0])/std
df = pd.DataFrame({"Raw":balance_raw, "Matched":balance_matched})
# df = df.rename(index = {'longitude':'Longitude', 'latitude':'Latitude', 'vpd_4m':'VPD$_{\rm 4\ months\ mean}$',\
# 'ppt_1y':'P$_{\rm 12\ months\ sum}$', 'agb':'AGB', 'wind':'Wind speed'})
fig, ax = plt.subplots(figsize = (3,3.5))
ax.scatter(df.Raw, df.index, marker = "s",s = 40, color = "k")
ax.scatter(df.Matched, df.index, marker = "o",s = 40, color = "k")
ax.axvline(0, color = "k")
for i in range(len(df.index)):
if df.Matched[i]-df.Raw[i]>0:
ax.arrow(df.Raw[i], i, df.Matched[i]-df.Raw[i]-0.2, 0,
head_width = 0.1, color = "k", linewidth =0.2)
else:
ax.arrow(df.Raw[i], i, df.Matched[i]-df.Raw[i]+0.17, 0,
head_width = 0.1, color = "k", linewidth =0.2)
ax.yaxis.set_ticks_position('none')
ax.set_xlabel("Standardized difference")
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
df = | pd.DataFrame() | pandas.DataFrame |
import os
import sys
sys.path.append('../')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from absl import flags, app
from tensorflow_probability.python.internal.samplers import split_seed
from tqdm import tqdm
from filterflow.base import State
from filterflow.models.optimal_proposal_linear_gaussian import make_filter, make_optimal_filter
from filterflow.resampling import MultinomialResampler, SystematicResampler, StratifiedResampler, RegularisedTransform
from filterflow.resampling.criterion import NeverResample, AlwaysResample, NeffCriterion
from filterflow.resampling.differentiable import PartiallyCorrectedRegularizedTransform
from filterflow.resampling.differentiable.loss import SinkhornLoss
from filterflow.resampling.differentiable.optimized import OptimizedPointCloud
from filterflow.resampling.differentiable.optimizer.sgd import SGD
import pickle
from scripts.optimal_proposal_common import get_data, ResamplingMethodsEnum, get_observation_matrix, \
get_observation_covariance, get_transition_covariance, get_transition_matrix
def pickle_obj(obj, file_path):
with open(file_path, 'wb') as handle:
pickle.dump(obj, handle, protocol=pickle.HIGHEST_PROTOCOL)
@tf.function
def routine(pf, initial_state, observations_dataset, T, log_phi_x, phi_y, seed):
with tf.GradientTape() as tape:
tape.watch([log_phi_x, phi_y])
final_state = pf(initial_state, observations_dataset, T, seed=seed, return_final=True)
res = -tf.reduce_mean(final_state.log_likelihoods)
return res, tape.gradient(res, [log_phi_x, phi_y]), tf.reduce_mean(final_state.ess)
def get_gradient_descent_function():
# This is a trick because tensorflow doesn't allow you to create variables inside a decorated function
@tf.function
def gradient_descent(pf, initial_state, observations_dataset, T, n_iter, optimizer, log_phi_x, phi_y,
initial_values, change_seed, seed):
variables = [log_phi_x, phi_y]
reset_operations = [k.assign(v) for k, v in zip(variables, initial_values)]
loss = tf.TensorArray(dtype=tf.float32, size=n_iter, dynamic_size=False)
ess = tf.TensorArray(dtype=tf.float32, size=n_iter, dynamic_size=False)
filter_seed, seed = split_seed(seed, n=2, salt='gradient_descent')
with tf.control_dependencies(reset_operations):
for i in tf.range(n_iter):
loss_value, grads, average_ess = routine(pf, initial_state, observations_dataset, T, log_phi_x, phi_y,
seed)
if change_seed:
filter_seed, seed = split_seed(filter_seed, n=2)
loss = loss.write(tf.cast(i, tf.int32), loss_value)
ess = ess.write(tf.cast(i, tf.int32), average_ess)
grads = [tf.clip_by_value(grad, -100., 100.) for grad in grads]
optimizer.apply_gradients(zip(grads, variables))
tf.print('\rStep', i, '/', n_iter, end='')
return [tf.convert_to_tensor(var) for var in variables], loss.stack(), ess.stack()
return gradient_descent
def compare_learning_rates(pf, initial_state, observations_dataset, T, log_phi_x, phi_y, initial_values,
n_iter, optimizer_maker, learning_rates, filter_seed, use_xla, change_seed):
loss_profiles = []
ess_profiles = []
for learning_rate in tqdm(learning_rates):
optimizer = optimizer_maker(learning_rate=learning_rate)
gradient_descent_function = get_gradient_descent_function()
final_variables, loss_profile, ess_profile = gradient_descent_function(pf, initial_state, observations_dataset,
T, n_iter,
optimizer, log_phi_x, phi_y,
initial_values, change_seed, filter_seed)
loss_profiles.append(-loss_profile.numpy() / T)
ess_profiles.append(ess_profile.numpy())
return loss_profiles, ess_profiles
def plot_losses_vs_ess(loss_profiles_df, ess_profiles_df, filename, savefig, dx, dy, dense, T, n_particles, change_seed,
batch_size, optimal_filter_val, kalman_val, n_iter, mse_table, n_data):
fig, ax = plt.subplots(figsize=(5, 3))
loss_profiles_df.style.float_format = '${:,.1f}'.format
loss_profiles_df.plot(ax=ax, legend=False)
ax.axhline(y=optimal_filter_val, color="k", linestyle=':')
ax.axhline(y=kalman_val, color="k")
ax.set_xlim(0, n_iter)
ax1 = ax.twinx()
ess_profiles_df.plot.area(ax=ax1, legend=False, linestyle='--', alpha=0.33, stacked=False)
# ax.set_ylim(-2.5, -1.7)
ax1.set_ylim(1, n_particles)
csv_fp = os.path.join('./charts/',
f'global_variational_different_loss_df_lr_loss_{filename}_dx_{dx}_dy_{dy}_dense_{dense}_T_{T}_change_seed_{change_seed}.csv')
loss_profiles_df.to_csv(csv_fp)
csv_fp = os.path.join('./charts/',
f'global_variational_different_ess_df_lr_loss_{filename}_dx_{dx}_dy_{dy}_dense_{dense}_T_{T}_change_seed_{change_seed}.csv')
ess_profiles_df.to_csv(csv_fp)
# ax.legend()
fig.tight_layout()
filename = f'global_variational_different_lr_loss_ess_{filename}_N_{n_particles}_dx_{dx}_dy_{dy}_dense_{dense}_T_{T}_change_seed_{change_seed}_batch_size_{batch_size}_ndata_{n_data}'
if savefig:
fig.savefig(os.path.join('./charts/',
filename + '.png'))
mse_table.to_csv(os.path.join('./tables/', filename + '.csv'),
float_format='%.5f')
else:
print(mse_table)
fig.suptitle(f'variational_different_loss_ess_{filename}_dx_{dx}_dy_{dy}_dense_{dense}_T_{T}')
plt.show()
def plot_variables(variables_df, filename, savefig):
fig, ax = plt.subplots(figsize=(5, 5))
variables_df.plot(ax=ax)
fig.tight_layout()
if savefig:
fig.savefig(os.path.join('./charts/', f'global_variational_different_lr_variables_{filename}.png'))
else:
fig.suptitle(f'variational_different_lr_variables_{filename}')
plt.show()
def resampling_method_factory(resampling_method_enum, resampling_kwargs):
if resampling_method_enum == ResamplingMethodsEnum.MULTINOMIAL:
resampling_method = MultinomialResampler()
elif resampling_method_enum == ResamplingMethodsEnum.SYSTEMATIC:
resampling_method = SystematicResampler()
elif resampling_method_enum == ResamplingMethodsEnum.STRATIFIED:
resampling_method = StratifiedResampler()
elif resampling_method_enum == ResamplingMethodsEnum.REGULARIZED:
resampling_method = RegularisedTransform(**resampling_kwargs)
elif resampling_method_enum == ResamplingMethodsEnum.VARIANCE_CORRECTED:
regularized_resampler = RegularisedTransform(**resampling_kwargs)
resampling_method = PartiallyCorrectedRegularizedTransform(regularized_resampler)
elif resampling_method_enum == ResamplingMethodsEnum.OPTIMIZED:
lr = resampling_kwargs.pop('lr', resampling_kwargs.pop('learning_rate', 0.1))
loss = SinkhornLoss(**resampling_kwargs, symmetric=True)
optimizer = SGD(loss, lr=lr, decay=0.95)
regularized_resampler = RegularisedTransform(**resampling_kwargs)
resampling_method = OptimizedPointCloud(optimizer, intermediate_resampler=regularized_resampler)
else:
raise ValueError(f'resampling_method_name {resampling_method_enum} is not a valid ResamplingMethodsEnum')
return resampling_method
def main(resampling_method_value, resampling_neff, learning_rates=(1e-4, 1e-3), resampling_kwargs=None,
alpha=0.42, dx=10, dy=3, observation_covariance=1., dense=False, T=20, batch_size=1, n_particles=25,
data_seed=0, n_data=50, n_iter=50, savefig=False, filter_seed=0, use_xla=False, change_seed=True):
transition_matrix = get_transition_matrix(alpha, dx)
transition_covariance = get_transition_covariance(dx)
observation_matrix = get_observation_matrix(dx, dy, dense)
observation_covariance = get_observation_covariance(observation_covariance, dy)
resampling_method_enum = ResamplingMethodsEnum(resampling_method_value)
np_random_state = np.random.RandomState(seed=data_seed)
observation_matrix = tf.convert_to_tensor(observation_matrix)
transition_covariance_chol = tf.linalg.cholesky(transition_covariance)
observation_covariance_chol = tf.linalg.cholesky(observation_covariance)
initial_particles = np_random_state.normal(0., 1., [batch_size, n_particles, dx]).astype(np.float32)
initial_state = State(initial_particles)
if resampling_neff == 0.:
resampling_criterion = NeverResample()
elif resampling_neff == 1.:
resampling_criterion = AlwaysResample()
else:
resampling_criterion = NeffCriterion(resampling_neff, True)
optimal_smc = make_optimal_filter(observation_matrix, transition_matrix, observation_covariance_chol,
transition_covariance_chol, MultinomialResampler(), resampling_criterion)
if resampling_kwargs is None:
resampling_kwargs = {}
resampling_method = resampling_method_factory(resampling_method_enum, resampling_kwargs)
datas = []
lls = []
observation_datasets = []
optimal_lls = []
log_phi_x_0 = tf.ones(dx)
phi_y_0 = tf.zeros(dy)
for _ in range(n_data):
data, ll = get_data(transition_matrix, observation_matrix, transition_covariance, observation_covariance, T,
np_random_state)
datas.append(data)
lls.append(ll / T)
observation_dataset = tf.data.Dataset.from_tensor_slices(data)
observation_datasets.append(observation_dataset)
final_state = optimal_smc(initial_state, observation_dataset, T, None, True, filter_seed)
optimal_lls.append(final_state.log_likelihoods.numpy().mean() / T)
log_phi_x = tf.Variable(log_phi_x_0, trainable=True)
phi_y = tf.Variable(phi_y_0, trainable=True)
smc = make_filter(observation_matrix, transition_matrix, observation_covariance_chol,
transition_covariance_chol, resampling_method, resampling_criterion,
log_phi_x, phi_y)
def optimizer_maker(learning_rate):
# tf.function doesn't like creating variables. This is a way to create them outside the graph
# We can't reuse the same optimizer because it would be giving a warmed-up momentum to the ones run later
optimizer = tf.optimizers.Adam(learning_rate=learning_rate)
return optimizer
initial_values = [log_phi_x_0, phi_y_0]
losses_list = []
ess_profiles_list = []
mean_errors = []
for observation_dataset in observation_datasets:
try:
losses, ess_profiles = compare_learning_rates(smc, initial_state, observation_dataset, T, log_phi_x, phi_y,
initial_values, n_iter, optimizer_maker, learning_rates,
filter_seed,
use_xla, change_seed)
except:
print('one dataset failed, ignoring')
continue
losses_df = pd.DataFrame(np.stack(losses).T, columns=np.log10(learning_rates))
ess_df = pd.DataFrame(np.stack(ess_profiles).T, columns=np.log10(learning_rates))
losses_df.columns.name = 'log learning rate'
losses_df.columns.epoch = 'epoch'
ess_df.columns.name = 'log learning rate'
ess_df.columns.epoch = 'epoch'
losses_list.append(losses_df)
ess_profiles_list.append(ess_df)
delta_phi_m_1 = tf.linalg.diag(tf.exp(-log_phi_x))
diff_cov = optimal_smc._proposal_model._sigma - delta_phi_m_1 @ transition_covariance
approx_error = tf.linalg.diag_part(diff_cov).numpy()
mean_error = np.sqrt(np.nanmean(approx_error ** 2))
mean_errors.append(mean_error)
losses_data = pd.concat(losses_list, axis=1)
ess_data = | pd.concat(ess_profiles_list, axis=1) | pandas.concat |
def parsedx(filename) :
"""PARSEDX
---------------------------------------------------------------------------
DESCRIPTION
Parsedx accepts an Excel file containing strings recorded from a Dillon
EDXtreme dynamometer and returns data parsed into separate datetime,
elapsed seconds, instantaneous force, and peak force columns. The strings
are printed by the Dillon EDXtreme in continuous recording mode in #4
format with the dynamometer connected to a computer by a serial port
connection and WedgeLink software.
INPUTS
filename: string - filepath of Excel file containing original strings
OUTPUTS
B: DataFrame - m x 4 DataFrame containing datetime, elapsed seconds,
instantaneous force, and peak force records in separate columns
---------------------------------------------------------------------------
"""
import pandas as pd
import re
import matplotlib.pyplot as plt
df=pd.read_excel(filename,sheet_name=0,header=None)
df.columns = ['strings']
dtstrings = [re.search('\d*\s\w*\s\d{4}[\,]\d*\:\d{2}\:\d{2}',string) for string in df.strings]
dt = [pd.to_datetime(dtstring.group(),format='%d %b %Y,%H:%M:%S') for dtstring in dtstrings]
t = [(dts - dt[0]).total_seconds() for dts in dt]
fstrings = [re.findall('\d*\.\d{1}',string) for string in df.strings]
f_ins = []
f_pk = []
for b in fstrings :
f_ins.append(pd.to_numeric(b[0]))
f_pk.append( | pd.to_numeric(b[1]) | pandas.to_numeric |
###################################################################################################
# Repository: https://github.com/lgervasoni/urbansprawl
# MIT License
###################################################################################################
import osmnx as ox
import pandas as pd
import geopandas as gpd
import numpy as np
from shapely.geometry import Polygon
from osmnx.utils import log
from .osm_tags import height_tags, activity_classification
from .osm_data import aggregate_classification
############################################
### Land uses surface association
############################################
def get_composed_classification(building, df_pois):
"""
Retrieve the composed classification given the building's containing Points of Interest
Parameters
----------
building : geopandas.GeoSeries
input building
df_pois : geopandas.GeoDataFrame
Points of Interest contained in the building
Returns
----------
geopandas.GeoSeries
returns a composed classification building
"""
# POIs aggregated classification
pois_classification = aggregate_classification( df_pois.classification.values )
# Composed building-POIs classification
composed_classification = aggregate_classification( [building.classification, pois_classification] )
# Composed activity categories
try:
composed_activity_category = list( set( [element for list_ in df_pois.activity_category for element in list_] + building.activity_category ) )
except: # df_pois.activity_category.isnull().all() Returns True
composed_activity_category = building.activity_category
# Create a Series for a new row with composed classification
composed_row = | pd.Series( [building.geometry,composed_classification,composed_activity_category,building.building_levels], index=["geometry","classification","activity_category","building_levels"]) | pandas.Series |
import json
import pandas
# merge duplicate and equal categories
# remove some corner topics
dataset = []
for line in open('data/News_Category_Dataset_v2.json', 'r'):
dataset.append(json.loads(line))
newDataset = []
for index, item in enumerate(dataset):
category = item['category']
# remove spams
# MONEY! TECH
if category == "PARENTING" or \
category == "PARENTS" or \
category == "RELIGION" or \
category == "ENVIRONMENT" or \
category == "FIFTY" or \
category == "GREEN" or \
category == "MONEY":
continue
# normalize categories
if category == "WELLNESS" or category == "HEALTHY LIVING":
item['category'] = "HEALTHY"
elif category == "STYLE":
item['category'] = "STYLE & BEAUTY"
elif category == "TASTE":
item['category'] = "FOOD & DRINK"
elif category == "ARTS" or category == "ARTS & CULTURE" or category == "CULTURE & ARTS":
item['category'] = "ARTS & CULTURE"
elif category == "COLLEGE" or category == "EDUCATION" or category == "SCIENCE":
item['category'] = "EDUCATION & SCIENCE"
elif category == "QUEER VOICES" or \
category == "THE WORLDPOST" or \
category == "WEIRD NEWS" or \
category == "WORLDPOST" or \
category == "BLACK VOICES" or \
category == "WORLD NEWS" or \
category == "GOOD NEWS" or \
category == "LATINO VOICES":
item['category'] = "NEWS"
newDataset.append(item)
| pandas.DataFrame(newDataset) | pandas.DataFrame |
from pathlib import Path
import pandas as pd
import numpy as np
from matplotlib.font_manager import FontProperties
import os, sys, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
grandpadir = os.path.dirname(os.path.dirname(currentdir))
sys.path.insert(0, grandpadir)
from collections import OrderedDict
from utils.helper_functions import read_nav_files, sort_files_by_dim
from analysis.comparison.comparison_utils import get_dataset_name, read_proteus_files, read_baseline_files, reform_pseudo_samples_dict
from utils.pseudo_samples import PseudoSamplesMger
from utils.shared_names import FileKeys, FileNames
import matplotlib.pyplot as plt
import statsmodels.api as sm
pipeline_grouping = 'results_predictive_grouping'
pipeline_no_grouping = 'results_predictive'
expl_size = 10
noise_level = None
keep_only_prot_fs = False
datasets = {
'wbc',
'ionosphere',
'arrhythmia'
}
# test_confs = [
# {'path': Path('..', pipeline, 'loda'), 'detector': 'loda', 'type': 'test'},
# # {'path': Path('..', pipeline, 'iforest'), 'detector': 'iforest', 'type': 'test'}
# ]
synth_confs =[
{'path': Path('..', pipeline_grouping, 'iforest'), 'detector': 'iforest', 'type': 'synthetic'},
{'path': Path('..', pipeline_grouping, 'lof'), 'detector': 'lof', 'type': 'synthetic'},
{'path': Path('..', pipeline_grouping, 'loda'), 'detector': 'loda', 'type': 'synthetic'}
]
real_confs = [
{'path': Path('..', pipeline_grouping, 'iforest'), 'detector': 'iforest', 'type': 'real'},
{'path': Path('..', pipeline_grouping, 'lof'), 'detector': 'lof', 'type': 'real'},
{'path': Path('..', pipeline_grouping, 'loda'), 'detector': 'loda', 'type': 'real'}
]
synth_confs_no_grouping = [
{'path': Path('..', pipeline_no_grouping, 'iforest'), 'detector': 'iforest', 'type': 'synthetic'},
{'path': Path('..', pipeline_no_grouping, 'lof'), 'detector': 'lof', 'type': 'synthetic'},
{'path': Path('..', pipeline_no_grouping, 'loda'), 'detector': 'loda', 'type': 'synthetic'}
]
confs_to_analyze = synth_confs
def plot_panels():
synth_no_grouping = unstructured_perfs(synth_confs_no_grouping)
synth_grouping = structured_perfs(synth_confs)
real_grouping = unstructured_perfs(real_confs)
bias_plot(synth_grouping, real_grouping, synth_no_grouping)
# test_auc_plot(pred_perfs_dict, 0)
# test_auc_plot(pred_perfs_dict, 1)
def best_models(conf):
best_models_perf_in_sample = pd.DataFrame()
cv_estimates = pd.DataFrame()
ci_in_sample = pd.DataFrame()
error_in_sample = pd.DataFrame()
best_models_perf_out_of_sample = pd.DataFrame()
dataset_names = []
nav_files_json = sort_files_by_dim(read_nav_files(conf['path'], conf['type']))
for dim, nav_file in nav_files_json.items():
real_dims = dim - 1 - (conf['type'] == 'synthetic')
dname = get_dataset_name(nav_file[FileKeys.navigator_original_dataset_path], conf['type'] != 'real')
print(dname + ' ' + str(real_dims) + 'd')
rel_fratio = '(' + str(int(round((dim-5)/dim, 2) * 100)) + '%)' if conf['type'] != 'real' else ''
dataset_names.append(dname + ' ' + str(real_dims) + 'd ' + rel_fratio)
# time_df = pd.concat([time_df, get_time_per_method(nav_file)], axis=1)
best_models_perf_in_sample_curr, ci_in_sample_curr, err_in_sample_curr, cv_estimates_curr = \
get_best_models_perf_per_method(nav_file, True)
best_models_perf_in_sample = pd.concat([best_models_perf_in_sample, best_models_perf_in_sample_curr], axis=1)
cv_estimates = | pd.concat([cv_estimates, cv_estimates_curr], axis=1) | pandas.concat |
# --------------
import pandas as pd
import scipy.stats as stats
import math
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#Sample_Size
sample_size=2000
#Z_Critical Score
z_critical = stats.norm.ppf(q = 0.95)
# path [File location variable]
#Code starts here
data= pd.read_csv(path)
data_sample=data.sample(n=sample_size, random_state=0)
sample_mean= data_sample.installment.mean()
sample_std= data_sample.installment.std()
margin_of_error= z_critical*(sample_std/math.sqrt(sample_size))
confidence_interval= [(sample_mean-margin_of_error), (sample_mean+margin_of_error)]
true_mean= data.installment.mean()
# --------------
import matplotlib.pyplot as plt
import numpy as np
#Different sample sizes to take
sample_size=np.array([20,50,100])
#Code starts here
fig, axes = plt.subplots(3,2)
for i in range(len(sample_size)):
m=[]
for j in range(1000):
m.append(data.sample(n=sample_size[i]).installment.mean())
mean_series= | pd.Series(m) | pandas.Series |
import random
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
NaT,
Timestamp,
date_range,
)
import pandas._testing as tm
class TestDataFrameSortValues:
def test_sort_values(self):
frame = DataFrame(
[[1, 1, 2], [3, 1, 0], [4, 5, 6]], index=[1, 2, 3], columns=list("ABC")
)
# by column (axis=0)
sorted_df = frame.sort_values(by="A")
indexer = frame["A"].argsort().values
expected = frame.loc[frame.index[indexer]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by="A", ascending=False)
indexer = indexer[::-1]
expected = frame.loc[frame.index[indexer]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by="A", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort_values(by=["A"], ascending=[False])
tm.assert_frame_equal(sorted_df, expected)
# multiple bys
sorted_df = frame.sort_values(by=["B", "C"])
expected = frame.loc[[2, 1, 3]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=["B", "C"], ascending=False)
tm.assert_frame_equal(sorted_df, expected[::-1])
sorted_df = frame.sort_values(by=["B", "A"], ascending=[True, False])
tm.assert_frame_equal(sorted_df, expected)
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
frame.sort_values(by=["A", "B"], axis=2, inplace=True)
# by row (axis=1): GH#10806
sorted_df = frame.sort_values(by=3, axis=1)
expected = frame
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=3, axis=1, ascending=False)
expected = frame.reindex(columns=["C", "B", "A"])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 2], axis="columns")
expected = frame.reindex(columns=["B", "A", "C"])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=[True, False])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=False)
expected = frame.reindex(columns=["C", "B", "A"])
tm.assert_frame_equal(sorted_df, expected)
msg = r"Length of ascending \(5\) != length of by \(2\)"
with pytest.raises(ValueError, match=msg):
frame.sort_values(by=["A", "B"], axis=0, ascending=[True] * 5)
def test_sort_values_by_empty_list(self):
# https://github.com/pandas-dev/pandas/issues/40258
expected = DataFrame({"a": [1, 4, 2, 5, 3, 6]})
result = expected.sort_values(by=[])
tm.assert_frame_equal(result, expected)
assert result is not expected
def test_sort_values_inplace(self):
frame = DataFrame(
np.random.randn(4, 4), index=[1, 2, 3, 4], columns=["A", "B", "C", "D"]
)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", inplace=True)
assert return_value is None
expected = frame.sort_values(by="A")
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by=1, axis=1, inplace=True)
assert return_value is None
expected = frame.sort_values(by=1, axis=1)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", ascending=False, inplace=True)
assert return_value is None
expected = frame.sort_values(by="A", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(
by=["A", "B"], ascending=False, inplace=True
)
assert return_value is None
expected = frame.sort_values(by=["A", "B"], ascending=False)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_multicolumn(self):
A = np.arange(5).repeat(20)
B = np.tile(np.arange(5), 20)
random.shuffle(A)
random.shuffle(B)
frame = DataFrame({"A": A, "B": B, "C": np.random.randn(100)})
result = frame.sort_values(by=["A", "B"])
indexer = np.lexsort((frame["B"], frame["A"]))
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
result = frame.sort_values(by=["A", "B"], ascending=False)
indexer = np.lexsort(
(frame["B"].rank(ascending=False), frame["A"].rank(ascending=False))
)
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
result = frame.sort_values(by=["B", "A"])
indexer = np.lexsort((frame["A"], frame["B"]))
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
def test_sort_values_multicolumn_uint64(self):
# GH#9918
# uint64 multicolumn sort
df = DataFrame(
{
"a": pd.Series([18446637057563306014, 1162265347240853609]),
"b": pd.Series([1, 2]),
}
)
df["a"] = df["a"].astype(np.uint64)
result = df.sort_values(["a", "b"])
expected = DataFrame(
{
"a": pd.Series([18446637057563306014, 1162265347240853609]),
"b": pd.Series([1, 2]),
},
index=pd.Index([1, 0]),
)
tm.assert_frame_equal(result, expected)
def test_sort_values_nan(self):
# GH#3917
df = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]}
)
# sort one column only
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A"], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{"A": [np.nan, 8, 6, 4, 2, 1, 1], "B": [5, 4, 5, 5, np.nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3],
)
sorted_df = df.sort_values(["A"], na_position="first", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
expected = df.reindex(columns=["B", "A"])
sorted_df = df.sort_values(by=1, axis=1, na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='last', order
expected = DataFrame(
{"A": [1, 1, 2, 4, 6, 8, np.nan], "B": [2, 9, np.nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2],
)
sorted_df = df.sort_values(["A", "B"])
tm.assert_frame_equal(sorted_df, expected)
# na_position='first', order
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 2, 9, np.nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A", "B"], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='first', not order
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A", "B"], ascending=[1, 0], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='last', not order
expected = DataFrame(
{"A": [8, 6, 4, 2, 1, 1, np.nan], "B": [4, 5, 5, np.nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2],
)
sorted_df = df.sort_values(["A", "B"], ascending=[0, 1], na_position="last")
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_stable_descending_sort(self):
# GH#6399
df = DataFrame(
[[2, "first"], [2, "second"], [1, "a"], [1, "b"]],
columns=["sort_col", "order"],
)
sorted_df = df.sort_values(by="sort_col", kind="mergesort", ascending=False)
tm.assert_frame_equal(df, sorted_df)
@pytest.mark.parametrize(
"expected_idx_non_na, ascending",
[
[
[3, 4, 5, 0, 1, 8, 6, 9, 7, 10, 13, 14],
[True, True],
],
[
[0, 3, 4, 5, 1, 8, 6, 7, 10, 13, 14, 9],
[True, False],
],
[
[9, 7, 10, 13, 14, 6, 8, 1, 3, 4, 5, 0],
[False, True],
],
[
[7, 10, 13, 14, 9, 6, 8, 1, 0, 3, 4, 5],
[False, False],
],
],
)
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_sort_values_stable_multicolumn_sort(
self, expected_idx_non_na, ascending, na_position
):
# GH#38426 Clarify sort_values with mult. columns / labels is stable
df = DataFrame(
{
"A": [1, 2, np.nan, 1, 1, 1, 6, 8, 4, 8, 8, np.nan, np.nan, 8, 8],
"B": [9, np.nan, 5, 2, 2, 2, 5, 4, 5, 3, 4, np.nan, np.nan, 4, 4],
}
)
# All rows with NaN in col "B" only have unique values in "A", therefore,
# only the rows with NaNs in "A" have to be treated individually:
expected_idx = (
[11, 12, 2] + expected_idx_non_na
if na_position == "first"
else expected_idx_non_na + [2, 11, 12]
)
expected = df.take(expected_idx)
sorted_df = df.sort_values(
["A", "B"], ascending=ascending, na_position=na_position
)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_stable_categorial(self):
# GH#16793
df = DataFrame({"x": Categorical(np.repeat([1, 2, 3, 4], 5), ordered=True)})
expected = df.copy()
sorted_df = df.sort_values("x", kind="mergesort")
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_datetimes(self):
# GH#3461, argsort / lexsort differences for a datetime column
df = DataFrame(
["a", "a", "a", "b", "c", "d", "e", "f", "g"],
columns=["A"],
index=date_range("20130101", periods=9),
)
dts = [
Timestamp(x)
for x in [
"2004-02-11",
"2004-01-21",
"2004-01-26",
"2005-09-20",
"2010-10-04",
"2009-05-12",
"2008-11-12",
"2010-09-28",
"2010-09-28",
]
]
df["B"] = dts[::2] + dts[1::2]
df["C"] = 2.0
df["A1"] = 3.0
df1 = df.sort_values(by="A")
df2 = df.sort_values(by=["A"])
tm.assert_frame_equal(df1, df2)
df1 = df.sort_values(by="B")
df2 = df.sort_values(by=["B"])
tm.assert_frame_equal(df1, df2)
df1 = df.sort_values(by="B")
df2 = df.sort_values(by=["C", "B"])
tm.assert_frame_equal(df1, df2)
def test_sort_values_frame_column_inplace_sort_exception(self, float_frame):
s = float_frame["A"]
with pytest.raises(ValueError, match="This Series is a view"):
s.sort_values(inplace=True)
cp = s.copy()
cp.sort_values() # it works!
def test_sort_values_nat_values_in_int_column(self):
# GH#14922: "sorting with large float and multiple columns incorrect"
# cause was that the int64 value NaT was considered as "na". Which is
# only correct for datetime64 columns.
int_values = (2, int(NaT.value))
float_values = (2.0, -1.797693e308)
df = DataFrame(
{"int": int_values, "float": float_values}, columns=["int", "float"]
)
df_reversed = DataFrame(
{"int": int_values[::-1], "float": float_values[::-1]},
columns=["int", "float"],
index=[1, 0],
)
# NaT is not a "na" for int64 columns, so na_position must not
# influence the result:
df_sorted = df.sort_values(["int", "float"], na_position="last")
tm.assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["int", "float"], na_position="first")
tm.assert_frame_equal(df_sorted, df_reversed)
# reverse sorting order
df_sorted = df.sort_values(["int", "float"], ascending=False)
tm.assert_frame_equal(df_sorted, df)
# and now check if NaT is still considered as "na" for datetime64
# columns:
df = DataFrame(
{"datetime": [Timestamp("2016-01-01"), NaT], "float": float_values},
columns=["datetime", "float"],
)
df_reversed = DataFrame(
{"datetime": [NaT, Timestamp("2016-01-01")], "float": float_values[::-1]},
columns=["datetime", "float"],
index=[1, 0],
)
df_sorted = df.sort_values(["datetime", "float"], na_position="first")
tm.assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["datetime", "float"], na_position="last")
tm.assert_frame_equal(df_sorted, df)
# Ascending should not affect the results.
df_sorted = df.sort_values(["datetime", "float"], ascending=False)
tm.assert_frame_equal(df_sorted, df)
def test_sort_nat(self):
# GH 16836
d1 = [Timestamp(x) for x in ["2016-01-01", "2015-01-01", np.nan, "2016-01-01"]]
d2 = [
Timestamp(x)
for x in ["2017-01-01", "2014-01-01", "2016-01-01", "2015-01-01"]
]
df = DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3])
d3 = [Timestamp(x) for x in ["2015-01-01", "2016-01-01", "2016-01-01", np.nan]]
d4 = [
Timestamp(x)
for x in ["2014-01-01", "2015-01-01", "2017-01-01", "2016-01-01"]
]
expected = DataFrame({"a": d3, "b": d4}, index=[1, 3, 0, 2])
sorted_df = df.sort_values(by=["a", "b"])
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_na_position_with_categories(self):
# GH#22556
# Positioning missing value properly when column is Categorical.
categories = ["A", "B", "C"]
category_indices = [0, 2, 4]
list_of_nans = [np.nan, np.nan]
na_indices = [1, 3]
na_position_first = "first"
na_position_last = "last"
column_name = "c"
reversed_categories = sorted(categories, reverse=True)
reversed_category_indices = sorted(category_indices, reverse=True)
reversed_na_indices = sorted(na_indices)
df = DataFrame(
{
column_name: Categorical(
["A", np.nan, "B", np.nan, "C"], categories=categories, ordered=True
)
}
)
# sort ascending with na first
result = df.sort_values(
by=column_name, ascending=True, na_position=na_position_first
)
expected = DataFrame(
{
column_name: Categorical(
list_of_nans + categories, categories=categories, ordered=True
)
},
index=na_indices + category_indices,
)
tm.assert_frame_equal(result, expected)
# sort ascending with na last
result = df.sort_values(
by=column_name, ascending=True, na_position=na_position_last
)
expected = DataFrame(
{
column_name: Categorical(
categories + list_of_nans, categories=categories, ordered=True
)
},
index=category_indices + na_indices,
)
tm.assert_frame_equal(result, expected)
# sort descending with na first
result = df.sort_values(
by=column_name, ascending=False, na_position=na_position_first
)
expected = DataFrame(
{
column_name: Categorical(
list_of_nans + reversed_categories,
categories=categories,
ordered=True,
)
},
index=reversed_na_indices + reversed_category_indices,
)
tm.assert_frame_equal(result, expected)
# sort descending with na last
result = df.sort_values(
by=column_name, ascending=False, na_position=na_position_last
)
expected = DataFrame(
{
column_name: Categorical(
reversed_categories + list_of_nans,
categories=categories,
ordered=True,
)
},
index=reversed_category_indices + reversed_na_indices,
)
tm.assert_frame_equal(result, expected)
def test_sort_values_nat(self):
# GH#16836
d1 = [Timestamp(x) for x in ["2016-01-01", "2015-01-01", np.nan, "2016-01-01"]]
d2 = [
Timestamp(x)
for x in ["2017-01-01", "2014-01-01", "2016-01-01", "2015-01-01"]
]
df = DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3])
d3 = [Timestamp(x) for x in ["2015-01-01", "2016-01-01", "2016-01-01", np.nan]]
d4 = [
Timestamp(x)
for x in ["2014-01-01", "2015-01-01", "2017-01-01", "2016-01-01"]
]
expected = DataFrame({"a": d3, "b": d4}, index=[1, 3, 0, 2])
sorted_df = df.sort_values(by=["a", "b"])
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_na_position_with_categories_raises(self):
df = DataFrame(
{
"c": Categorical(
["A", np.nan, "B", np.nan, "C"],
categories=["A", "B", "C"],
ordered=True,
)
}
)
with pytest.raises(ValueError, match="invalid na_position: bad_position"):
df.sort_values(by="c", ascending=False, na_position="bad_position")
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"original_dict, sorted_dict, ignore_index, output_index",
[
({"A": [1, 2, 3]}, {"A": [3, 2, 1]}, True, [0, 1, 2]),
({"A": [1, 2, 3]}, {"A": [3, 2, 1]}, False, [2, 1, 0]),
(
{"A": [1, 2, 3], "B": [2, 3, 4]},
{"A": [3, 2, 1], "B": [4, 3, 2]},
True,
[0, 1, 2],
),
(
{"A": [1, 2, 3], "B": [2, 3, 4]},
{"A": [3, 2, 1], "B": [4, 3, 2]},
False,
[2, 1, 0],
),
],
)
def test_sort_values_ignore_index(
self, inplace, original_dict, sorted_dict, ignore_index, output_index
):
# GH 30114
df = DataFrame(original_dict)
expected = DataFrame(sorted_dict, index=output_index)
kwargs = {"ignore_index": ignore_index, "inplace": inplace}
if inplace:
result_df = df.copy()
result_df.sort_values("A", ascending=False, **kwargs)
else:
result_df = df.sort_values("A", ascending=False, **kwargs)
tm.assert_frame_equal(result_df, expected)
tm.assert_frame_equal(df, DataFrame(original_dict))
def test_sort_values_nat_na_position_default(self):
# GH 13230
expected = DataFrame(
{
"A": [1, 2, 3, 4, 4],
"date": pd.DatetimeIndex(
[
"2010-01-01 09:00:00",
"2010-01-01 09:00:01",
"2010-01-01 09:00:02",
"2010-01-01 09:00:03",
"NaT",
]
),
}
)
result = expected.sort_values(["A", "date"])
tm.assert_frame_equal(result, expected)
def test_sort_values_item_cache(self, using_array_manager):
# previous behavior incorrect retained an invalid _item_cache entry
df = DataFrame(np.random.randn(4, 3), columns=["A", "B", "C"])
df["D"] = df["A"] * 2
ser = df["A"]
if not using_array_manager:
assert len(df._mgr.blocks) == 2
df.sort_values(by="A")
ser.values[0] = 99
assert df.iloc[0, 0] == df["A"][0]
def test_sort_values_reshaping(self):
# GH 39426
values = list(range(21))
expected = DataFrame([values], columns=values)
df = expected.sort_values(expected.index[0], axis=1, ignore_index=True)
tm.assert_frame_equal(df, expected)
class TestDataFrameSortKey: # test key sorting (issue 27237)
def test_sort_values_inplace_key(self, sort_by_key):
frame = DataFrame(
np.random.randn(4, 4), index=[1, 2, 3, 4], columns=["A", "B", "C", "D"]
)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", inplace=True, key=sort_by_key)
assert return_value is None
expected = frame.sort_values(by="A", key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(
by=1, axis=1, inplace=True, key=sort_by_key
)
assert return_value is None
expected = frame.sort_values(by=1, axis=1, key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(
by="A", ascending=False, inplace=True, key=sort_by_key
)
assert return_value is None
expected = frame.sort_values(by="A", ascending=False, key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(
by=["A", "B"], ascending=False, inplace=True, key=sort_by_key
)
expected = frame.sort_values(by=["A", "B"], ascending=False, key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_key(self):
df = DataFrame(np.array([0, 5, np.nan, 3, 2, np.nan]))
result = df.sort_values(0)
expected = df.iloc[[0, 4, 3, 1, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(0, key=lambda x: x + 5)
expected = df.iloc[[0, 4, 3, 1, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(0, key=lambda x: -x, ascending=False)
expected = df.iloc[[0, 4, 3, 1, 2, 5]]
tm.assert_frame_equal(result, expected)
def test_sort_values_by_key(self):
df = DataFrame(
{
"a": np.array([0, 3, np.nan, 3, 2, np.nan]),
"b": np.array([0, 2, np.nan, 5, 2, np.nan]),
}
)
result = df.sort_values("a", key=lambda x: -x)
expected = df.iloc[[1, 3, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by=["a", "b"], key=lambda x: -x)
expected = df.iloc[[3, 1, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by=["a", "b"], key=lambda x: -x, ascending=False)
expected = df.iloc[[0, 4, 1, 3, 2, 5]]
tm.assert_frame_equal(result, expected)
def test_sort_values_by_key_by_name(self):
df = DataFrame(
{
"a": np.array([0, 3, np.nan, 3, 2, np.nan]),
"b": np.array([0, 2, np.nan, 5, 2, np.nan]),
}
)
def key(col):
if col.name == "a":
return -col
else:
return col
result = df.sort_values(by="a", key=key)
expected = df.iloc[[1, 3, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by=["a"], key=key)
expected = df.iloc[[1, 3, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by="b", key=key)
expected = df.iloc[[0, 1, 4, 3, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by=["a", "b"], key=key)
expected = df.iloc[[1, 3, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
def test_sort_values_key_string(self):
df = DataFrame(np.array([["hello", "goodbye"], ["hello", "Hello"]]))
result = df.sort_values(1)
expected = df[::-1]
tm.assert_frame_equal(result, expected)
result = df.sort_values([0, 1], key=lambda col: col.str.lower())
tm.assert_frame_equal(result, df)
result = df.sort_values(
[0, 1], key=lambda col: col.str.lower(), ascending=False
)
expected = df.sort_values(1, key=lambda col: col.str.lower(), ascending=False)
tm.assert_frame_equal(result, expected)
def test_sort_values_key_empty(self, sort_by_key):
df = DataFrame(np.array([]))
df.sort_values(0, key=sort_by_key)
df.sort_index(key=sort_by_key)
def test_changes_length_raises(self):
df = DataFrame({"A": [1, 2, 3]})
with pytest.raises(ValueError, match="change the shape"):
df.sort_values("A", key=lambda x: x[:1])
def test_sort_values_key_axes(self):
df = DataFrame({0: ["Hello", "goodbye"], 1: [0, 1]})
result = df.sort_values(0, key=lambda col: col.str.lower())
expected = df[::-1]
tm.assert_frame_equal(result, expected)
result = df.sort_values(1, key=lambda col: -col)
expected = df[::-1]
tm.assert_frame_equal(result, expected)
def test_sort_values_key_dict_axis(self):
df = DataFrame({0: ["Hello", 0], 1: ["goodbye", 1]})
result = df.sort_values(0, key=lambda col: col.str.lower(), axis=1)
expected = df.loc[:, ::-1]
tm.assert_frame_equal(result, expected)
result = df.sort_values(1, key=lambda col: -col, axis=1)
expected = df.loc[:, ::-1]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_sort_values_key_casts_to_categorical(self, ordered):
# https://github.com/pandas-dev/pandas/issues/36383
categories = ["c", "b", "a"]
df = DataFrame({"x": [1, 1, 1], "y": ["a", "b", "c"]})
def sorter(key):
if key.name == "y":
return pd.Series(
Categorical(key, categories=categories, ordered=ordered)
)
return key
result = df.sort_values(by=["x", "y"], key=sorter)
expected = DataFrame(
{"x": [1, 1, 1], "y": ["c", "b", "a"]}, index=pd.Index([2, 1, 0])
)
tm.assert_frame_equal(result, expected)
@pytest.fixture
def df_none():
return DataFrame(
{
"outer": ["a", "a", "a", "b", "b", "b"],
"inner": [1, 2, 2, 2, 1, 1],
"A": np.arange(6, 0, -1),
("B", 5): ["one", "one", "two", "two", "one", "one"],
}
)
@pytest.fixture(params=[["outer"], ["outer", "inner"]])
def df_idx(request, df_none):
levels = request.param
return df_none.set_index(levels)
@pytest.fixture(
params=[
"inner", # index level
["outer"], # list of index level
"A", # column
[("B", 5)], # list of column
["inner", "outer"], # two index levels
[("B", 5), "outer"], # index level and column
["A", ("B", 5)], # Two columns
["inner", "outer"], # two index levels and column
]
)
def sort_names(request):
return request.param
@pytest.fixture(params=[True, False])
def ascending(request):
return request.param
class TestSortValuesLevelAsStr:
def test_sort_index_level_and_column_label(
self, df_none, df_idx, sort_names, ascending
):
# GH#14353
# Get index levels from df_idx
levels = df_idx.index.names
# Compute expected by sorting on columns and the setting index
expected = df_none.sort_values(
by=sort_names, ascending=ascending, axis=0
).set_index(levels)
# Compute result sorting on mix on columns and index levels
result = df_idx.sort_values(by=sort_names, ascending=ascending, axis=0)
tm.assert_frame_equal(result, expected)
def test_sort_column_level_and_index_label(
self, df_none, df_idx, sort_names, ascending
):
# GH#14353
# Get levels from df_idx
levels = df_idx.index.names
# Compute expected by sorting on axis=0, setting index levels, and then
# transposing. For some cases this will result in a frame with
# multiple column levels
expected = (
df_none.sort_values(by=sort_names, ascending=ascending, axis=0)
.set_index(levels)
.T
)
# Compute result by transposing and sorting on axis=1.
result = df_idx.T.sort_values(by=sort_names, ascending=ascending, axis=1)
tm.assert_frame_equal(result, expected)
def test_sort_values_pos_args_deprecation(self):
# https://github.com/pandas-dev/pandas/issues/41485
df = DataFrame({"a": [1, 2, 3]})
msg = (
r"In a future version of pandas all arguments of DataFrame\.sort_values "
r"except for the argument 'by' will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = df.sort_values("a", 0)
expected = DataFrame({"a": [1, 2, 3]})
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
"""
json 불러와서 캡션 붙이는 것
"""
import json
import pandas as pd
path = './datasets/vqa/v2_OpenEnded_mscoco_train2014_questions.json'
with open(path) as question:
question = json.load(question)
# question['questions'][0]
# question['questions'][1]
# question['questions'][2]
df = pd.DataFrame(question['questions'])
df
caption_path = './datasets/caption/vis_st_trainval.json'
with open(caption_path) as cap:
cap = json.load(cap)
df_cap = pd.DataFrame(cap)
df_cap
df_addcap = pd.merge(df, df_cap, how='left', on='image_id')
del df_addcap['file_path']
########################################################################################################################
"""
pandas to json
"""
df_addcap.to_json('./datasets/caption/train_cap2.json', orient='table')
with open('./datasets/caption/train_cap2.json') as train_cap:
train_cap = json.load(train_cap)
########################################################################################################################
########################################################################################################################
"""
answer + cap
"""
path = '/home/nextgen/Desktop/mcan-vqa/datasets/vqa/v2_mscoco_train2014_annotations.json'
path = './datasets/vqa/v2_mscoco_val2014_annotations.json'
with open(path) as answer:
answer = json.load(answer)
answer['annotations'][0]
df_ans = pd.DataFrame(answer['annotations'])
df_ans[:0]
del df_ans['question_type']
del df_ans['answers']
del df_ans['answer_type']
del df_ans['image_id']
df_ans[df_ans['question_id']==458752000]
df_addcap2 = pd.merge(df_addcap, df_ans, how='left', on='question_id')
df_addcap2[:0]
df_addcap2['multiple_choice_answer']
# del df_addcap['file_path']
df_addcap2.to_json('./datasets/caption/val_qacap.json', orient='table')
with open('./datasets/caption/train_qacap.json') as train_qacap:
train_qacap = json.load(train_qacap)
########################################################################################################################
"""val test도 마찬가지"""
path = './datasets/vqa/v2_OpenEnded_mscoco_val2014_questions.json'
with open(path) as question:
question = json.load(question)
df = pd.DataFrame(question['questions'])
df
caption_path = './datasets/caption/vis_st_trainval.json'
with open(caption_path) as cap:
cap = json.load(cap)
df_cap = | pd.DataFrame(cap) | pandas.DataFrame |
####################################################################################################
"""
dashboard.py
This script implements a dashboard-application for the efficient planning of the municipal
enforcement process, based on housing fraud signals, within the municipality of Amsterdam.
<NAME> & <NAME> 2019
Basic intro on working with Dash: https://dash.plot.ly/getting-started
Example dashboards using maps in Dash (from dash-gallery.plotly.host/Portal):
github.com/plotly/dash-sample-apps/blob/master/apps/dash-oil-and-gas/app.py
github.com/plotly/dash-oil-gas-ternary
This dashboard took some inspiration from this video:
https://www.youtube.com/watch?v=lu0PtsMor4E
Inspiration has also been taken from the corresponding codebase:
https://github.com/amyoshino/Dash_Tutorial_Series (careful: this repo seems to be full of errors!!)
"""
####################################################################################################
#############
## Imports ##
#############
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_table as dt
import dash_table.FormatTemplate as FormatTemplate
from dash.dependencies import Input, Output, State, ClientsideFunction
import pandas as pd
import urllib
import json
import sys
import os
import re
import q
from copy import deepcopy
import plotly.graph_objs as go
# Add the parent paths to sys.path, so our own modules on the root dir can also be imported.
SCRIPT_PATH = os.path.abspath(__file__)
SCRIPT_DIR = os.path.dirname(SCRIPT_PATH)
PARENT_PATH = os.path.join(SCRIPT_DIR, os.path.pardir)
sys.path.append(PARENT_PATH)
# Import own modules.
import config
import dashboard_helper
#################################
## Load server or mock-up data ##
#################################
# Try to create a list of 100 meldingen from the data.
try:
df = dashboard_helper.process_recent_signals()
print('Succesfully created prediction for recent signals.')
except:
df = pd.read_csv(os.path.join(SCRIPT_DIR, 'mockup_dataset.csv'), sep=';', skipinitialspace=True)
print('Cannot generate predictions from the data. Falling back to using the mockup_dataset.csv')
df_proactief = pd.read_csv(os.path.join(SCRIPT_DIR, 'mockup_dataset_proactief.csv'), sep=';', skipinitialspace=True)
df_unsupervised = pd.read_csv(os.path.join(SCRIPT_DIR, 'mockup_dataset_unsupervised.csv'), sep=';', skipinitialspace=True)
#########################
## Define site visuals ##
#########################
colors = {'paper': '#DDDDDD',
'background': '#F2F2F2',
'container_background': '#F9F9F9',
'text': '#1E4363',
'marker': '#1E4363',
'fraud': 'rgb(200, 50, 50)',
'no_fraud': 'rgb(150, 150, 150)',
'selected': 'rgb(75, 75, 75)',
}
###############################
## Set some global variables ##
###############################
# Get dictionary of columns for DataTable.
SELECTED_COLUMNS = ['fraude_kans', 'woonfraude', 'adres_id', 'sdl_naam', 'categorie', 'eigenaar']
TABLE_COLUMNS = [{'name': i, 'id': i} for i in SELECTED_COLUMNS]
# Define styling for the first column (fraude_kans), to reduce the decimals after comma.
TABLE_COLUMNS[0]['name'] = 'Fraude kans (%)'
TABLE_COLUMNS[0]['type'] = 'numeric'
TABLE_COLUMNS[0]['format'] = FormatTemplate.percentage(2)
##########################
## Define the dashboard ##
##########################
# external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
# app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app = dash.Dash(__name__)
server = app.server
app.title = 'Woonfraude Dashboard'
# Defines the meldingen tab.
meldingen_tab = html.Div(
[
# Div containing a selection of the data based on dropdown selection.
html.Div(id='intermediate_value', style={'display': 'none'}),
# Divs contain a lists of points which have been selected with on-clicks on the map.
html.Div(id='point_selection', style={'display': 'none'}),
html.Div(id='filtered_point_selection', style={'display': 'none'}),
# Row containing filters, info boxes, and map.
html.Div(
[
# Filters div.
html.Div(
[
# Create drop down filter for categories.
html.P('Selecteer categorieën:', className="control_label"),
dcc.Dropdown(
id='categorie_dropdown',
placeholder='Selecteer categorieën',
options=[{'label': x, 'value': x} for x in sorted(df.categorie.unique())],
multi=True,
value=df.categorie.unique(),
),
# Create drop down filter for city parts.
html.P('Selecteer stadsdelen:', className="control_label"),
dcc.Dropdown(
id='stadsdeel_dropdown',
placeholder='Selecteer stadsdelen',
options=[{'label': x, 'value': x} for x in sorted(df.sdl_naam.unique())],
multi=True,
value=sorted(df.sdl_naam.unique()),
),
# Show info of items selected on map (using click).
html.Div(
[
html.P('Geselecteerde adressen:', className="control_label"),
dt.DataTable(
id='filtered_point_selection_table',
columns = TABLE_COLUMNS[1:-1],
sort_action='native',
sort_by=[{'column_id': 'fraude_kans', 'direction': 'desc'}],
page_action='native',
page_current=0,
page_size=20,
style_data_conditional=[
{
'if': {
'column_id': 'woonfraude',
'filter_query': '{woonfraude} eq True'
},
'backgroundColor': colors['fraud'],
},
{
'if': {
'column_id': 'woonfraude',
'filter_query': '{woonfraude} eq False'
},
'backgroundColor': colors['no_fraud'],
},
{
'if': {
'column_id': 'fraude_kans',
'filter_query': '{fraude_kans} ge 0.5'
},
'backgroundColor': colors['fraud'],
},
{
'if': {
'column_id': 'fraude_kans',
'filter_query': '{fraude_kans} lt 0.5'
},
'backgroundColor': colors['no_fraud'],
},
]
),
],
),
# Link to download csv with all selected addresses.
html.A(
'Download lijst geselecteerde adressen (CSV)',
id='download_selected_addresses_list',
download="geselecteerde_adressen.csv",
href="",
target="_blank",
),
# Button test.
html.P(''),
html.Button('Test', id='button'),
html.P('', id='button_n_clicks')
],
id='leftCol',
className="pretty_container four columns",
),
# Widgets and map div.
html.Div(
[
# # Row with 4 statistics widgets
# html.Div(
# [
# # Aantal meldingen (info box).
# html.Div(
# [
# html.P("Aantal meldingen"),
# html.H6(
# id="aantal_meldingen",
# className="info_text"
# )
# ],
# className="pretty_container"
# ),
# # Percentage fraude verwacht (info box).
# html.Div(
# [
# html.P("% Fraude verwacht"),
# html.H6(
# id="percentage_fraude_verwacht",
# className="info_text"
# )
# ],
# className="pretty_container"
# ),
# # Aantal geselecteerde meldingen (info box).
# html.Div(
# [
# html.P("Aantal geselecteerde meldingen"),
# html.H6(
# id="aantal_geselecteerde_meldingen",
# className="info_text"
# )
# ],
# className="pretty_container",
# style={'backgroundColor': '#F7D7D7'}
# ),
# # Percentage fraude verwacht bij geselecteerde meldingen (info box).
# html.Div(
# [
# html.P("% Fraude verwacht bij geselecteerde meldingen"),
# html.H6(
# id="percentage_fraude_verwacht_geselecteerd",
# className="info_text"
# )
# ],
# className="pretty_container",
# style={'backgroundColor': '#F7D7D7'}
# ),
# ],
# id="infoContainer",
# className="row"
# ),
# Map with selectable points.
html.Div(
dcc.Graph(
id='map',
config={'displayModeBar': False}, # Turned off to disable selection with box/lasso etc.
),
className="pretty_container",
# style={'height': 500}
),
],
id="rightCol",
className="eight columns"
),
],
className="row",
),
# Data table div.
html.Div(
[
# Filtered entries data table.
html.Div(
[
html.P('Gefilterde meldingen'),
dt.DataTable(
id='filtered_table',
columns = TABLE_COLUMNS,
sort_action='native',
sort_by=[{'column_id': 'fraude_kans', 'direction': 'desc'}],
# filter_action='native', # Maybe turn off? A text field to filter feels clunky..
# row_selectable='multi',
# selected_rows=[],
page_action='native',
page_current=0,
page_size=20,
style_data_conditional=[
{
'if': {
'column_id': 'woonfraude',
'filter_query': '{woonfraude} eq True'
},
'backgroundColor': colors['fraud'],
},
{
'if': {
'column_id': 'woonfraude',
'filter_query': '{woonfraude} eq False'
},
'backgroundColor': colors['no_fraud'],
},
{
'if': {
'column_id': 'fraude_kans',
'filter_query': '{fraude_kans} ge 0.5'
},
'backgroundColor': colors['fraud'],
},
{
'if': {
'column_id': 'fraude_kans',
'filter_query': '{fraude_kans} lt 0.5'
},
'backgroundColor': colors['no_fraud'],
}
]
),
],
className="pretty_container eight columns",
),
# Filtered entries stadsdeel split (pie chart).
html.Div(
[
html.P("Gefilterde meldingen - Stadsdeel split"),
dcc.Graph(
id="stadsdeel_split",
config={'displayModeBar': False},
)
],
id="stadsdeel",
className="pretty_container two columns"
),
# Filtered entries categorie split (pie chart).
html.Div(
[
html.P("Gefilterde meldingen - Categorie split"),
dcc.Graph(
id="categorie_split",
config={'displayModeBar': False},
)
],
id="categorie",
className="pretty_container two columns"
),
],
className="row"
),
],
id="mainContainer",
style={
"display": "flex",
"flex-direction": "column"
}
)
# Defines the proactief tab.
proactief_tab = html.Div(
[
# For creating a map_proactief callback function with an empty input.
html.Div(id='none_proactief',children=[],style={'display': 'none'}),
# Div for containing a selection of the data based on filters.
html.Div(id='intermediate_value_proactief', style={'display': 'none'}),
# Row containing filters, info boxes, and map.
html.Div(
[
# Filters div.
html.Div(
[
# Create range slider for number of meldingen.
html.P('Minimaal aantal meldingen op adres:', className="control_label"),
dcc.RangeSlider(
id='aantal_meldingen_rangeslider_proactief',
min=min(df_proactief.aantal_meldingen),
max=max(df_proactief.aantal_meldingen),
marks={i: f"{i}" for i in range(min(df_proactief.aantal_meldingen), max(df_proactief.aantal_meldingen)+1)},
value=[min(df_proactief.aantal_meldingen), max(df_proactief.aantal_meldingen)]
),
# Padding (temporary hack)
html.P(' '),
# Create slider for number of adults.
html.P('Aantal volwassenen', className="control_label"),
dcc.RangeSlider(
id='aantal_volwassenen_rangeslider_proactief',
min=min(df_proactief.aantal_volwassenen),
max=max(df_proactief.aantal_volwassenen),
marks={i: f"{i}" for i in range(min(df_proactief.aantal_volwassenen), max(df_proactief.aantal_volwassenen)+1)},
value=[min(df_proactief.aantal_volwassenen), max(df_proactief.aantal_volwassenen)]
),
# Padding (temporary hack)
html.P(' '),
# Create m2 per person slider.
html.P('Aantal m2 per persoon:', className="control_label"),
dcc.RangeSlider(
id='aantal_m2_per_persoon_rangeslider_proactief',
min=min(df_proactief.m2_per_persoon),
max=max(df_proactief.m2_per_persoon),
marks={i: f"{i}" for i in range(min(df_proactief.m2_per_persoon), max(df_proactief.m2_per_persoon)+1, 3)},
value=[min(df_proactief.m2_per_persoon), max(df_proactief.m2_per_persoon)]
),
# Padding (temporary hack)
html.P(' '),
# Create drop down filter for city parts.
html.P('Selecteer stadsdelen:', className="control_label"),
dcc.Dropdown(
id='stadsdeel_dropdown_proactief',
placeholder='Selecteer stadsdelen',
options=[{'label': x, 'value': x} for x in sorted(df_proactief.sdl_naam.unique())],
multi=True,
value=sorted(df_proactief.sdl_naam.unique()),
),
# Create hotline dropdown.
html.P('Is hotline melding:', className="control_label"),
dcc.Dropdown(
id='hotline_dropdown_proactief',
placeholder='Selecteer waarden',
options=[{'label': 'Ja', 'value': 'True'}, {'label': 'Nee', 'value': 'False'}],
multi=True,
value=['True', 'False']
),
# Create gebruikersdoel dropdown.
html.P('Selecteer gebruikersdoel:', className="control_label"),
dcc.Dropdown(
id='gebruikersdoel_dropdown_proactief',
placeholder='Selecteer gebruikersdoel',
options=[{'label': x, 'value': x} for x in sorted(df_proactief.gebruikersdoel.unique())],
multi=True,
value=sorted(df_proactief.gebruikersdoel.unique()),
),
# Create profiel dropdown.
html.P('Selecteer profiel:', className="control_label"),
dcc.Dropdown(
id='profiel_dropdown_proactief',
placeholder='Selecteer profiel',
options=[{'label': x, 'value': x} for x in sorted(df_proactief.profiel.unique())],
multi=True,
value=sorted(df_proactief.profiel.unique()),
),
],
id='leftCol_proactief',
className="pretty_container four columns",
),
# Map div.
html.Div(
[
# Map with selectable points.
html.Div(
dcc.Graph(
id='map_proactief',
config={'displayModeBar': False}, # Turned off to disable selection with box/lasso etc.
),
className="pretty_container",
# style={'height': 500}
),
],
id="rightCol_proactief",
className="eight columns"
),
],
className="row",
),
# Data table div.
html.Div(
[
# Filtered entries data table.
html.Div(
[
html.P('Gefilterde meldingen'),
dt.DataTable(
id='filtered_table_proactief',
columns = TABLE_COLUMNS,
sort_action='native',
sort_by=[{'column_id': 'fraude_kans', 'direction': 'desc'}],
# filter_action='native', # Maybe turn off? A text field to filter feels clunky..
# row_selectable='multi',
# selected_rows=[],
page_action='native',
page_current=0,
page_size=20,
style_data_conditional=[
{
'if': {
'column_id': 'woonfraude',
'filter_query': '{woonfraude} eq True'
},
'backgroundColor': colors['fraud'],
},
{
'if': {
'column_id': 'woonfraude',
'filter_query': '{woonfraude} eq False'
},
'backgroundColor': colors['no_fraud'],
},
{
'if': {
'column_id': 'fraude_kans',
'filter_query': '{fraude_kans} ge 0.5'
},
'backgroundColor': colors['fraud'],
},
{
'if': {
'column_id': 'fraude_kans',
'filter_query': '{fraude_kans} lt 0.5'
},
'backgroundColor': colors['no_fraud'],
}
]
),
],
className="pretty_container ten columns",
),
# Filtered entries stadsdeel split (pie chart).
html.Div(
[
html.P("Gefilterde meldingen - Stadsdeel split"),
dcc.Graph(
id="stadsdeel_split_proactief",
config={'displayModeBar': False},
)
],
id="stadsdeel_proactief",
className="pretty_container two columns"
),
],
className="row"
),
# html.Div(
# dcc.Graph(
# id='map_proactief',
# config={'displayModeBar': False}, # Turned off to disable selection with box/lasso etc.
# ),
# className="pretty_container",
# ),
],
style={
"display": "flex",
"flex-direction": "column"
}
)
# Defines the unsupervised tab.
unsupervised_tab = html.Div(
[
# For creating a map_unsupervised callback function with an empty input.
html.Div(id='none_unsupervised',children=[],style={'display': 'none'}),
# Div for containing a selection of the data based on filters.
html.Div(id='intermediate_value_unsupervised', style={'display': 'none'}),
html.Div(
dcc.Graph(
id='map_unsupervised',
config={'displayModeBar': False}, # Turned off to disable selection with box/lasso etc.
),
className="pretty_container",
),
],
style={
"display": "flex",
"flex-direction": "column"
}
)
# Combines the two tabs into a single app.
app.layout = html.Div([
# Title
html.H1("Woonfraude Dashboard", style={'textAlign': 'center'}),
# Tabs for meldingen & proactieve handhaving.
dcc.Tabs(id='tabs', value='meldingen_tab', children=[
dcc.Tab(label='Meldingen', value='meldingen_tab', children=[meldingen_tab]),
dcc.Tab(label='Proactieve handhaving', value='proactief_tab', children=[proactief_tab]),
dcc.Tab(label='Unsupervised', value='unsupervised_tab', children=[unsupervised_tab]),
])
])
# Updates the intermediate data based on the dropdown selection.
@app.callback(
Output('intermediate_value', 'children'),
[Input('categorie_dropdown', 'value'),
Input('stadsdeel_dropdown', 'value')]
)
def create_data_selection(selected_categories, selected_stadsdelen):
# Create a copy of the original dataframe.
df_filtered = deepcopy(df)
# Filter the original dataframe by selected categories.
df_filtered = df_filtered[df_filtered.categorie.isin(selected_categories)]
# Filter the dataframe by selected stadsdelen.
df_filtered = df_filtered[df_filtered.sdl_naam.isin(selected_stadsdelen)]
return df_filtered.to_json(date_format='iso', orient='split')
'''
# Updates the aantal_meldingen info box.
@app.callback(
Output('aantal_meldingen', 'children'),
[Input('intermediate_value', 'children')]
)
def count_items(intermediate_value):
# Load the pre-filtered version of the dataframe.
df = pd.read_json(intermediate_value, orient='split')
return len(df)
# Updates the percentage_fraude_verwacht info box.
@app.callback(
Output('percentage_fraude_verwacht', 'children'),
[Input('intermediate_value', 'children')]
)
def compute_fraud_percentage(intermediate_value):
# Load the pre-filtered version of the dataframe.
df = pd.read_json(intermediate_value, orient='split')
# Compute what percentage of cases is expected to be fraudulent. If/else to prevent division by 0.
if len(df.woonfraude) > 0:
fraude_percentage = len(df.woonfraude[df.woonfraude == True]) / len(df.woonfraude) * 100
else:
fraude_percentage = 0
# Return truncated value (better for printing on dashboard)
return round(fraude_percentage, 1)
# Updates the aantal_geselecteerde_meldingen info box.
@app.callback(
Output('aantal_geselecteerde_meldingen', 'children'),
[Input('filtered_point_selection', 'children')]
)
def count_items_selected(filtered_point_selection):
# Just return the amount of filtered selected points.
return len(filtered_point_selection)
# Updates the percentage_fraude_verwacht_geselecteerd info box.
@app.callback(
Output('percentage_fraude_verwacht_geselecteerd', 'children'),
[Input('intermediate_value', 'children'),
Input('filtered_point_selection', 'children')]
)
def compute_fraud_percentage_selected(intermediate_value, filtered_point_selection):
# Load the pre-filtered version of the dataframe.
df = pd.read_json(intermediate_value, orient='split')
# Reduce the dataframe using the point selection.
df = df[df.adres_id.isin(filtered_point_selection)]
# Compute what percentage of cases is expected to be fraudulent. If/else to prevent division by 0.
if len(df.woonfraude) > 0:
fraude_percentage = len(df.woonfraude[df.woonfraude == True]) / len(df.woonfraude) * 100
else:
fraude_percentage = 0
# Return truncated value (better for printing on dashboard)
return round(fraude_percentage, 1)
'''
# Updates the map based on dropdown-selections.
@app.callback(
Output('map', 'figure'),
[Input('intermediate_value', 'children'),
Input('point_selection', 'children')],
[State('map', 'figure')]
)
def plot_map(intermediate_value, point_selection, map_state):
# Define which input triggers the callback (map.figure or intermediate_value.children).
trigger_event = dash.callback_context.triggered[0]['prop_id']
# Load the pre-filtered version of the dataframe.
df_map = pd.read_json(intermediate_value, orient='split')
# Select positive and negative samples for plotting.
pos = df_map[df_map.woonfraude==True]
neg = df_map[df_map.woonfraude==False]
# Create a df of the selected points, for highlighting.
selected_point_ids = [int(x) for x in point_selection]
sel = df_map.loc[df_map.adres_id.isin(selected_point_ids)]
# Create texts for when hovering the mouse over items.
def make_hover_string(row):
return f"Adres id: {row.adres_id}\
<br>Categorie: {row.categorie}\
<br>Aantal inwoners: {row.aantal_personen}\
<br>Aantal achternamen: {row.aantal_achternamen}\
<br>Eigenaar: {row.eigenaar}"
pos_text = pos.apply(make_hover_string, axis=1)
neg_text = neg.apply(make_hover_string, axis=1)
sel_text = sel.apply(make_hover_string, axis=1)
figure={
'data': [
# Plot border for selected samples (plot first, so its behind the pos/neg samples).
go.Scattermapbox(
name='Geselecteerd',
lat=sel['wzs_lat'],
lon=sel['wzs_lon'],
text=sel_text,
mode='markers',
marker=dict(
size=17,
color=colors['selected'],
),
),
# Plot positive samples.
go.Scattermapbox(
name='Woonfraude verwacht',
lat=pos['wzs_lat'],
lon=pos['wzs_lon'],
text=pos_text,
hoverinfo='text',
mode='markers',
marker=dict(
size=12,
color=colors['fraud'],
),
),
# Plot negative samples.
go.Scattermapbox(
name='<NAME>',
lat=neg['wzs_lat'],
lon=neg['wzs_lon'],
text=neg_text,
hoverinfo='text',
mode='markers',
marker=dict(
size=12,
color=colors['no_fraud'],
),
),
],
'layout': go.Layout(
uirevision='never',
autosize=True,
hovermode='closest',
# width=1000,
height=700,
margin=go.layout.Margin(l=0, r=0, b=0, t=0, pad=0),
showlegend=False, # Set to False, since legend selection breaks custom point selection.
legend=dict(orientation='h'),
plot_bgcolor=colors['background'],
paper_bgcolor=colors['paper'],
mapbox=dict(
accesstoken=config.mapbox_access_token,
style="light",
center=dict(
lat=52.36,
lon=4.89
),
zoom=11,
),
)
}
return figure
# Updates the table showing all data points after dropdown-selections.
@app.callback(
Output('filtered_table', 'data'),
[Input('intermediate_value', 'children')]
)
def generate_filtered_table(intermediate_value):
# Load the pre-filtered version of the dataframe.
df_table = pd.read_json(intermediate_value, orient='split')
# Transform True and False boolean values to strings.
df_table.woonfraude = df_table.woonfraude.replace({True: 'True', False: 'False'})
# Only use a selection of the columns.
df_table = df_table[SELECTED_COLUMNS]
# Create a table, with all positive woonfraude examples at the top.
columns = [{"name": i, "id": i} for i in df_table.columns]
data = df_table.to_dict('records')
return data
# Enable the selection of map points using click-events.
@app.callback(
Output('point_selection', 'children'),
[Input('map', 'clickData'),
Input('intermediate_value', 'children')],
[State('point_selection', 'children')]
)
def update_point_selection_on_click(clickData, intermediate_value, existing_point_selection):
"""
Update point selection with newly selected points, or according to dropdown filters.
The input "intermediate_value:children" is only used to activate a callback.
"""
# Define which input triggers the callback (map.clickData or intermediate_value.children).
trigger_event = dash.callback_context.triggered[0]['prop_id']
# Re-use previous point selection (if it already existed).
point_selection = []
if existing_point_selection != None:
point_selection = existing_point_selection
# Add a clicked point to the selection, or remove it when it already existed in the selection.
if trigger_event == 'map.clickData':
if clickData != None:
point_id = re.match("Adres id: (\d+)", clickData['points'][0]['text']).group(1)
if point_id in point_selection:
point_selection.remove(point_id)
else:
point_selection.append(point_id)
return point_selection
# Create a filtered version of the point_selection, based on the categorie and stadsdeel filters.
@app.callback(
Output('filtered_point_selection', 'children'),
[Input('point_selection', 'children'),
Input('intermediate_value', 'children')]
)
def show_selected(existing_point_selection, intermediate_value):
# Re-use previous point selection (if it already existed).
point_selection = []
if existing_point_selection != None:
point_selection = existing_point_selection
# Filter any previously selected points, if the dropdown selections rule them out.
df = pd.read_json(intermediate_value, orient='split') # Load the pre-filtered version of the dataframe.
point_ids_list = [str(x) for x in list(df.adres_id)]
for point_id in point_selection:
if point_id not in point_ids_list:
point_selection.remove(point_id)
return point_selection
# Updates the table showing a list of the selected & filtered points.
@app.callback(
Output('filtered_point_selection_table', 'data'),
[Input('intermediate_value', 'children'),
Input('filtered_point_selection', 'children')]
)
def generate_filtered_point_selection_table(intermediate_value, filtered_point_selection):
# First check if any points have been selected.
if filtered_point_selection == []:
return []
else:
# Turn list of point_ids into a list of numbers instead of strings
point_selection = [int(x) for x in filtered_point_selection]
# Load the pre-filtered version of the dataframe.
df = pd.read_json(intermediate_value, orient='split')
# Reduce the dataframe using the point selection.
df = df[df.adres_id.isin(point_selection)]
# Transform True and False boolean values to strings.
df.woonfraude = df.woonfraude.replace({True: 'True', False: 'False'})
# Only use a selection of the columns.
df = df[SELECTED_COLUMNS]
# Create a table, with all positive woonfraude examples at the top.
columns = [{"name": i, "id": i} for i in df.columns]
data = df.to_dict('records')
return data
# TODO: CHANGE WHEN THE DOWNLOAD LINK IS UPDATED WITH NEW DATA.
# NOW THIS CODE BELOW IS RAN EVERY TIME A POINT IS (DE)SELECTED,
# THIS IS TERRIBLY INEFFICIENT. ACCEPTABLE FOR THE MVP, BUT SHOULD BE CHANGED.
# Creates a download link for the filtered_point_selection_table data.
@app.callback(
Output('download_selected_addresses_list', 'href'),
[Input('filtered_point_selection_table', 'data')])
def update_download_link(filtered_point_selection_table):
"""Updates the csv download link with the data in the filtered point selection table."""
if filtered_point_selection_table == []:
point_selection = []
else:
# Turn list of point_ids into a list of numbers instead of strings
point_selection = filtered_point_selection_table
# Convert to df, then to csv string, then return for downloading.
df = pd.DataFrame(point_selection)
csv_string = df.to_csv(index=False, encoding='utf-8', sep=';')
csv_string = "data:text/csv;charset=utf-8," + urllib.parse.quote(csv_string)
return csv_string
# Test for our button output.
@app.callback(
Output('button_n_clicks', 'children'),
[Input('button', 'n_clicks')])
def show_number_of_button_clicks(button_n_clicks):
return str(button_n_clicks)
# Updates the stadsdeel split PIE chart.
@app.callback(
Output('stadsdeel_split', 'figure'),
[Input('intermediate_value', 'children')]
)
def make_stadsdeel_pie_chart(intermediate_value):
# Load the pre-filtered version of the dataframe.
df = pd.read_json(intermediate_value, orient='split')
# Create value counts per stadsdeel.
stadsdeel_value_counts = df.sdl_naam.value_counts().sort_index()
figure={
'data': [
go.Pie(
labels=stadsdeel_value_counts.index,
values=stadsdeel_value_counts.values
)
],
'layout': go.Layout(
height=300,
margin=go.layout.Margin(l=0, r=0, b=100, t=0, pad=0),
showlegend=True,
legend=dict(orientation='h', font={'size':10}),
paper_bgcolor=colors['container_background'],
)
}
return figure
# Updates the categorie split pie chart.
@app.callback(
Output('categorie_split', 'figure'),
[Input('intermediate_value', 'children')]
)
def make_categorie_pie_chart(intermediate_value):
# Load the pre-filtered version of the dataframe.
df = pd.read_json(intermediate_value, orient='split')
# Create value counts per categorie.
categorie_value_counts = df.categorie.value_counts().sort_index()
figure={
'data': [
go.Pie(
labels=categorie_value_counts.index,
values=categorie_value_counts.values
)
],
'layout': go.Layout(
height=300,
margin=go.layout.Margin(l=0, r=0, b=100, t=0, pad=0),
showlegend=True,
legend=dict(orientation='h', x=0, y=0, font={'size':10}),
paper_bgcolor=colors['container_background'],
)
}
return figure
'''
# Updates the stadsdeel split BAR chart.
@app.callback(
Output('stadsdeel_split', 'figure'),
[Input('intermediate_value', 'children')]
)
def make_stadsdeel_split_bar_chart(intermediate_value):
# Load the pre-filtered version of the dataframe.
df = pd.read_json(intermediate_value, orient='split')
# Create value counts per stadsdeel.
stadsdeel_value_counts = df.stadsdeel.value_counts(ascending=True)
x=stadsdeel_value_counts.values
y=stadsdeel_value_counts.index
percentages = [(val/sum(x))*100 for val in x]
# Create annotations for showing the percentages on top of the bars.
annotations = []
for num, p in enumerate(percentages):
annotation = dict(xref='x1',
yref='y1',
x=0.5,
y=num,
text=f"{p}%",
showarrow=False,
align='left'
)
annotations.append(annotation)
figure={
'data': [
go.Bar(
y=y,
x=x,
text=percentages,
marker=dict(
color='rgba(50, 171, 96, 0.6)',
line=dict(
color='rgba(50, 171, 96, 1.0)',
width=2),
),
showlegend=False,
orientation='h'
)
],
'layout': go.Layout(
height=200,
margin=go.layout.Margin(l=100, r=0, b=0, t=0, pad=0),
paper_bgcolor=colors['container_background'],
annotations=annotations
)
}
return figure
# Updates the categorie split BAR chart.
@app.callback(
Output('categorie_split', 'figure'),
[Input('intermediate_value', 'children')]
)
def make_stadsdeel_split_bar_chart(intermediate_value):
# Load the pre-filtered version of the dataframe.
df = pd.read_json(intermediate_value, orient='split')
# Create value counts per categorie.
stadsdeel_value_counts = df.categorie.value_counts(ascending=True)
x=stadsdeel_value_counts.values
y=stadsdeel_value_counts.index
percentages = [(val/sum(x))*100 for val in x]
# Create annotations for showing the percentages on top of the bars.
annotations = []
for num, p in enumerate(percentages):
annotation = dict(xref='x1',
yref='y1',
x=0.5,
y=num,
text=f"{p}%",
showarrow=False,
align='left'
)
annotations.append(annotation)
figure={
'data': [
go.Bar(
y=y,
x=x,
text=percentages,
marker=dict(
color='rgba(50, 171, 96, 0.6)',
line=dict(
color='rgba(50, 171, 96, 1.0)',
width=2),
),
showlegend=False,
orientation='h'
)
],
'layout': go.Layout(
height=200,
margin=go.layout.Margin(l=100, r=0, b=0, t=0, pad=0),
paper_bgcolor=colors['container_background'],
annotations=annotations
)
}
return figure
'''
############################
## Proactief tab functies ##
############################
# @app.callback(
# Output('intermediate_value_proactief', 'children'),
# [Input('none_proactief', 'children')]
# )
# def create_data_selection(_):
# return df_proactief.to_json(date_format='iso', orient='split')
# Updates the intermediate data based on the dropdown selection.
@app.callback(
Output('intermediate_value_proactief', 'children'),
[Input('aantal_meldingen_rangeslider_proactief', 'value'),
Input('aantal_volwassenen_rangeslider_proactief', 'value'),
Input('aantal_m2_per_persoon_rangeslider_proactief', 'value'),
Input('stadsdeel_dropdown_proactief', 'value'),
Input('hotline_dropdown_proactief', 'value'),
Input('gebruikersdoel_dropdown_proactief', 'value'),
Input('profiel_dropdown_proactief', 'value')]
)
def create_data_selection(aantal_meldingen_range, aantal_volwassenen,
aantal_m2_per_persoon, selected_stadsdelen, is_hotline,
selected_gebruikersdoelen, selected_profielen):
# Create a copy of the original dataframe.
df_filtered = deepcopy(df_proactief)
# Filter the original dataframe by aantal meldingen.
min_meldingen = aantal_meldingen_range[0]
max_meldingen = aantal_meldingen_range[1]
df_filtered = df_filtered[(df_filtered.aantal_meldingen >= min_meldingen) & (df_filtered.aantal_meldingen <= max_meldingen)]
# Filter on number of adults
min_adults = aantal_volwassenen[0]
max_adults = aantal_volwassenen[1]
df_filtered = df_filtered[(df_filtered.aantal_volwassenen >= min_adults) & (df_filtered.aantal_volwassenen <= max_adults)]
# Filter on the amount of m2 per person.
min_m2_pp = aantal_m2_per_persoon[0]
max_m2_pp = aantal_m2_per_persoon[1]
df_filtered = df_filtered[(df_filtered.m2_per_persoon >= min_m2_pp) & (df_filtered.m2_per_persoon <= max_m2_pp)]
# Filter the dataframe by selected stadsdelen.
df_filtered = df_filtered[df_filtered.sdl_naam.isin(selected_stadsdelen)]
# Filter the dataframe based on whether the meldingen are hotline meldingen.
# To do this, first convert the is_hotline values (strings) to booleans for matching.
is_hotline = [True if x=='True' else x for x in is_hotline]
is_hotline = [False if x=='False' else x for x in is_hotline]
df_filtered = df_filtered[df_filtered.is_hotline.isin(is_hotline)]
# Filter the dataframe by selected gebruikersdoelen.
df_filtered = df_filtered[df_filtered.gebruikersdoel.isin(selected_gebruikersdoelen)]
# Filter the dataframe by selected profiles.
df_filtered = df_filtered[df_filtered.profiel.isin(selected_profielen)]
return df_filtered.to_json(date_format='iso', orient='split')
@app.callback(
Output('map_proactief', 'figure'),
[Input('intermediate_value_proactief', 'children')]
)
def plot_map(intermediate_value_proactief):
# Load the pre-filtered version of the dataframe.
df = | pd.read_json(intermediate_value_proactief, orient='split') | pandas.read_json |
"""
Prelim script for looking at netcdf files and producing some trends
These estimates can also be used for P03 climate estimation
"""
#==============================================================================
__title__ = "Global Climate Trends"
__author__ = "<NAME>"
__version__ = "v1.0(13.02.2019)"
__email__ = "<EMAIL>"
#==============================================================================
# +++++ Check the paths and set ex path to fireflies folder +++++
import os
import sys
if not os.getcwd().endswith("fireflies"):
if "fireflies" in os.getcwd():
p1, p2, _ = os.getcwd().partition("fireflies")
os.chdir(p1+p2)
else:
raise OSError(
"This script was called from an unknown path. CWD can not be set"
)
sys.path.append(os.getcwd())
#==============================================================================
# Import packages
import numpy as np
import pandas as pd
import argparse
import datetime as dt
from collections import OrderedDict
import warnings as warn
from netCDF4 import Dataset, num2date, date2num
from scipy import stats
import xarray as xr
from numba import jit
import bottleneck as bn
import scipy as sp
from scipy import stats
import statsmodels.stats.multitest as smsM
# Import plotting and colorpackages
import matplotlib.pyplot as plt
import matplotlib.colors as mpc
import matplotlib as mpl
import palettable
import seaborn as sns
import cartopy.crs as ccrs
import cartopy.feature as cpf
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
# Import debugging packages
import ipdb
print("numpy version : ", np.__version__)
print("pandas version : ", pd.__version__)
print("xarray version : ", xr.__version__)
#==============================================================================
def main():
# =========== Create the summary of the datasets to be analyised ==========
data= OrderedDict()
data["tas"] = ({
'fname':"./data/cli/1.TERRACLIMATE/TerraClimate_stacked_tmean_1958to2017_GIMMSremapbil_yearmean.nc",
'var':"tmean", "gridres":"GIMMS", "region":"Global", "Periods":["Annual"]
})
data["pre"] = ({
'fname':"./data/cli/1.TERRACLIMATE/TerraClimate_stacked_ppt_1958to2017_GIMMSremapbil_yearsum.nc",
'var':"ppt", "gridres":"GIMMS", "region":"Global", "Periods":["Annual"]
})
# ========== loop over each dataset ==========
for dt in data:
# ========== set up the params for the trend ==========
# st_yrs = [1960, 1970, 1982, 1990, 1999]
st_yrs = [1982]
# windows = [20, 15, 10, 5]
windows = [20]
# ========== Set the ploting and overwite params ==========
plot = False #True
# force = True
for period in data[dt]["Periods"]:
# ========== Perform the rolling window smoothing ==========
RollingWindow(
data[dt]["fname"], data[dt]["var"], "polyfit", windows,
period, data[dt]["gridres"], data[dt]["region"],
yr_start=1982, yr_end=2017, force=False, plot=plot)
RollingWindow(
data[dt]["fname"], data[dt]["var"], "scipyols", windows,
period, data[dt]["gridres"], data[dt]["region"],
yr_start=1982, yr_end=2017, force=False, plot=plot)
RollingWindow(
data[dt]["fname"], data[dt]["var"], "theilsen", windows,
period, data[dt]["gridres"], data[dt]["region"],
yr_start=1982, yr_end=2017, force=False, plot=plot)
# ========== Perform the uncorrected trend detection ==========
# trendmapper(
# data[dt]["fname"], data[dt]["var"], "polyfit",
# period, data[dt]["gridres"], data[dt]["region"],
# st_yrs, plot = plot)#, force=True)
# trendmapper(
# data[dt]["fname"], data[dt]["var"], "scipyols",
# period, data[dt]["gridres"], data[dt]["region"],
# st_yrs, plot = plot)#, force=True)
# trendmapper(
# data[dt]["fname"], data[dt]["var"], "theilsen",
# period, data[dt]["gridres"], data[dt]["region"],
# st_yrs, plot = plot)#, force=True)
# sys.exit()
# Reshape to an array with as many rows as years and as many columns as there are pixels
# ipdb.set_trace()
#==============================================================================
# ============================= Primary functions =============================
#==============================================================================
def plotmaker():
# ========== Build all the plots ==========
if not plot:
return True
# +++++ Plot number +++++
pn = 1
for styp in range(0, len(start_years)):
for num in range(0, len(kys)):
# ========== create the colormap ==========
cmap, vmin, vmax = cbvals(var, kys[num])
if any ([cm is None for cm in [cmap, vmin, vmax]]):
warn.warn("no colorbar exists for %s, skipping" % (kys[num]))
ipdb.set_trace()
# continue
print(styp, num)
ax = plt.subplot(len(start_years),len(kys), pn, projection=ccrs.PlateCarree())
ax.add_feature(cpf.BORDERS, linestyle='--', zorder=102)
ax.add_feature(cpf.LAKES, alpha=0.5, zorder=103)
ax.add_feature(cpf.RIVERS, zorder=104)
# add lat long linse
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=1, color='gray', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
if num == 0:
gl.xlabels_bottom = False
if not ((pn-1) % len(start_years)):
gl.ylabels_left = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
# ========== Make the map ==========
# ipdb.set_trace()
ds_trend[kys[num]].isel(time=styp).plot(ax=ax, transform=ccrs.PlateCarree(),
cmap=cmap, vmin=vmin, vmax=vmax, cbar_kwargs={
"extend":"both"})#, "pad":0.0075,"fraction":0.125, "shrink":0.74}) #"fraction":0.05,
pn += 1
# ax.set_title=seasons[num]
# for vas, cl in zip(RFinfo.RF17.unique().tolist(), ['yx', "r*","k."]):
# ax.plot(RFinfo.lon.values, RFinfo.lat.values,
# "kx", markersize=4, transform=ccrs.PlateCarree())
# plt.subplots_adjust(
# top=0.98,
# bottom=0.02,
# left=0.038,
# right=0.989,
# hspace=0.05,
# wspace=0.037)
fig = plt.gcf()
# fig.set_size_inches(len(start_years)*3, len(kys)*6)
fig.set_size_inches(41, 20)
# plt.tight_layout()
plt.savefig("./%s_Testplotv2.png" % var)
# plt.colose
# ipdb.set_trace()
# plt.savefig("./Testplot.pdf")
# plt.show()
# plt.coloes
ipdb.set_trace()
def RollingWindow(
fname, var, method, window, period, gridres, region,
yr_start=1982, yr_end = 2015, force = False, plot=True):
"""Function to perform a rolling window smoothing on the precipitation and climate data
args
fname: String
string of the netcdf to be opened
var: string
string of the variable name within the netcdf
window: int
the number of time periods to be used
period: str
description of the accumulation period
gridres: str
description of the resolution of the grid
region: str
descript of the data region
yr_start
the first year to be included in trend analysis
yr_end
the last year to be included in trend analysis
force: bool
force the creation of new netcdf files
plot: bool
true makes plots
"""
# ========== Open the dataset ==========
ds = xr.open_dataset(fname)
print("Starting rolling window calculations for %s" % var)
# ========== build an output file name ==========
fout = (
'./results/netcdf/TerraClimate_%s_RollingMean_%s_%sto%d_%s%s.nc' % (
period, var, method, yr_end, region, gridres))
# ========== Test if a file alread exists ==========
if all([os.path.isfile(fout), not force]):
warn.warn("Loading existing file, force is needed to overwrite")
ds_trend = xr.open_dataset(fout)
kys = [n for n in ds_trend.data_vars]
else:
# ========== Create the global attributes ==========
global_attrs = GlobalAttributes(ds, var)
# ========== Create the rolling window means ==========
results = []
years = []
# ========== Pull out the data seasonality ==========
annual = ds[var]
# ========== Loop over each of the mooving windows ==========
for win in window:
rmean = annual.rolling(time=win).mean()
dst = rmean.sel(time=slice('%d-01-01' % yr_start, '%d-12-31' % yr_end))
# ========== Get the trend ==========
trends, kys = _fitvals(dst, method=method)
# ========== add a correction for multiple comparisons ==========
if "pvalue" in kys:
trends, kys = MultipleComparisons(trends, kys, aplha = 0.10, MCmethod="fdr_bh")
results.append(trends)
years.append(yr_start-win)
# ========== convert data to netcdf format ==========
layers, encoding = dsmaker(ds, var, results, kys, years, method)
ds_trend = xr.Dataset(layers, attrs= global_attrs)
try:
print("Starting write of data")
ds_trend.to_netcdf(fout,
format = 'NETCDF4',
encoding = encoding,
unlimited_dims = ["time"])
print(".nc file created")
ipdb.set_trace()
except Exception as e:
print(e)
warn.warn(" \n something went wrong with the save, going interactive")
ipdb.set_trace()
#
if plot:
warn.warn("plotting has not been implemented in this function yet. Going interactive")
ipdb.set_trace()
def trendmapper(
fname, var, method, period, gridres, region,
start_years, endyr = 2015, fdpath="", force = False, plot=True):
ds = xr.open_dataset(fname)
# ========== Create the outfile name ==========
fout = './results/netcdf/TerraClimate_%s_%s_%sto%d_%s%s.nc' % (
period, var, method, endyr,region, gridres)
# ========== Check if the file already exists ==========
if all([os.path.isfile(fout), not force]):
warn.warn("Loading existing file, force is needed to overwrite")
ds_trend = xr.open_dataset(fout)
kys = [n for n in ds_trend.data_vars]
else:
results = []
# ========== Create the global attributes ==========
global_attrs = GlobalAttributes(ds, var)
if period == "OptimalAccumulated":
annual = ds[var]
else:
if period == "Annual":
man_annual = ds[var].groupby('time.year')
else:
# Grouping by the season
man_annual = ds[var].where(ds[var]['time.season'] == period).groupby('time.year')
# Account for the different variables
if var == "tmean":
annual = man_annual.mean(dim='time')
else:
annual = man_annual.sum(dim='time')
for styr in start_years:
if period == "OptimalAccumulated":
dst = annual.sel(time=slice('%d-01-01' % styr, '%d-12-31' % endyr))
else:
dst = annual.sel(year=slice('%d-01-01' % styr, '%d-12-31' % endyr))
trends, kys = _fitvals(dst, method=method)
# Correct for multiple comparisons
if "pvalue" in kys:
trends, kys = MultipleComparisons(trends, kys, aplha = 0.10)
results.append(trends)
layers, encoding = dsmaker(ds, var, results, kys, start_years, method)
ds_trend = xr.Dataset(layers, attrs= global_attrs)
try:
print("Starting write of data")
ds_trend.to_netcdf(fout,
format = 'NETCDF4',
encoding = encoding,
unlimited_dims = ["time"])
except Exception as e:
print(e)
warn.warn(" \n something went wrong with the save, going interactive")
ipdb.set_trace()
# get the value
#==============================================================================
# ========================= Netcdf Creation Functions =========================
#==============================================================================
def GlobalAttributes(ds, var):
"""
Creates the global attributes for the netcdf file that is being written
these attributes come from :
https://www.unidata.ucar.edu/software/thredds/current/netcdf-java/metadata/DataDiscoveryAttConvention.html
args
ds: xarray ds
Dataset containing the infomation im intepereting
var: str
name of the variable
returns:
attributes Ordered Dictionary cantaining the attribute infomation
"""
# ========== Create the ordered dictionary ==========
attr = OrderedDict()
# fetch the references for my publications
# pubs = puplications()
# ========== Fill the Dictionary ==========
# ++++++++++ Highly recomended ++++++++++
attr["title"] = "Trend in Climate (%s)" % (var)
attr["summary"] = "Annual and season trends in %s" % var
attr["Conventions"] = "CF-1.7"
# ++++++++++ Data Provinance ++++++++++
attr["history"] = "%s: Netcdf file created using %s (%s):%s by %s" % (
str(pd.Timestamp.now()), __title__, __file__, __version__, __author__)
attr["history"] += ds.history
attr["creator_name"] = __author__
attr["creator_url"] = "ardenburrell.com"
attr["creator_email"] = __email__
attr["institution"] = "University of Leicester"
attr["date_created"] = str(pd.Timestamp.now())
# ++++++++++ Netcdf Summary infomation ++++++++++
attr["time_coverage_start"] = str(dt.datetime(ds['time.year'].min(), 1, 1))
attr["time_coverage_end"] = str(dt.datetime(ds['time.year'].max() , 12, 31))
return attr
def dsmaker(ds, var, results, keys, start_years, method):
"""
Build a summary of relevant paramters
args
ds: xarray ds
Dataset containing the infomation im intepereting
var: str
name of the variable
return
ds xarray dataset
"""
# sys.exit()
# date = [dt.datetime(ds['time.year'].max() , 12, 31)]
times = OrderedDict()
tm = [dt.datetime(yr , 12, 31) for yr in start_years]
times["time"] = pd.to_datetime(tm)
times["calendar"] = 'standard'
times["units"] = 'days since 1900-01-01 00:00'
times["CFTime"] = date2num(
tm, calendar=times["calendar"], units=times["units"])
dates = times["CFTime"]
try:
lat = ds.lat.values
lon = ds.lon.values
except AttributeError:
lat = ds.latitude.values
lon = ds.longitude.values
# dates = [dt.datetime(yr , 12, 31) for yr in start_years]
# ipdb.set_trace()
# ========== Start making the netcdf ==========
layers = OrderedDict()
encoding = OrderedDict()
# ========== loop over the keys ==========
try:
for pos in range(0, len(keys)):
# ipdb.set_trace()
if type(results[0]) == np.ndarray:
Val = results[pos][np.newaxis,:, :]
else:
# multiple variables
Val = np.stack([res[pos] for res in results])
ky = keys[pos]
# build xarray dataset
DA=xr.DataArray(Val,
dims = ['time', 'latitude', 'longitude'],
coords = {'time': dates,'latitude': lat, 'longitude': lon},
attrs = ({
'_FillValue':9.96921e+36,
'units' :"1",
'standard_name':ky,
'long_name':"%s %s" % (method, ky)
}),
)
DA.longitude.attrs['units'] = 'degrees_east'
DA.latitude.attrs['units'] = 'degrees_north'
DA.time.attrs["calendar"] = times["calendar"]
DA.time.attrs["units"] = times["units"]
layers[ky] = DA
encoding[ky] = ({'shuffle':True,
# 'chunksizes':[1, ensinfo.lats.shape[0], 100],
'zlib':True,
'complevel':5})
return layers, encoding
except Exception as e:
warn.warn("Code failed with: \n %s \n Going Interactive" % e)
ipdb.set_trace()
raise e
#===============================================================================
# ============================= Internal Functions =============================
#===============================================================================
def MultipleComparisons(trends, kys, aplha = 0.10, MCmethod="fdr_by"):
"""
Takes the results of an existing trend detection aproach and modifies them to
account for multiple comparisons.
args
trends: list
list of numpy arrays containing results of trend analysis
kys: list
list of what is in results
years:
years of accumulation
"""
if MCmethod == "fdr_by":
print("Adjusting for multiple comparisons using Benjamini/Yekutieli")
elif MCmethod == "fdr_bh":
print("Adjusting for multiple comparisons using Benjamini/Hochberg")
else:
warn.warn("unknown MultipleComparisons method, Going Interactive")
ipdb.set_trace()
# ========== Locate the p values and reshape them into a 1d array ==========
# ++++++++++ Find the pvalues ++++++++++
index = kys.index("pvalue")
pvalue = trends[index]
isnan = np.isnan(pvalue)
# ++++++++++ pull out the non nan pvalus ++++++++++
# pvalue1d = pvalue.flatten()
pvalue1d = pvalue[~isnan]
# isnan1d = isnan.flatten()
# =========== Perform the MC correction ===========
pvalue_adj = smsM.multipletests(pvalue1d, method=MCmethod, alpha=0.10)
# ++++++++++ reformat the data into array ++++++++++
MCR = ["Significant", "pvalue_adj"]
for nm in MCR:
# make an empty array
re = np.zeros(pvalue.shape)
re[:] = np.NAN
if nm == "Significant":
re[~isnan] = pvalue_adj[MCR.index(nm)].astype(int).astype(float)
else:
re[~isnan] = pvalue_adj[MCR.index(nm)]
# +++++ add the significant and adjusted pvalues to trends+++++
trends.append(re)
kys.append(nm)
return trends, kys
def cbvals(var, ky):
"""Function to store all the colorbar infomation i need """
cmap = None
vmin = None
vmax = None
if ky == "slope":
if var == "tmean":
vmax = 0.07
vmin = -0.07
cmap = mpc.ListedColormap(palettable.cmocean.diverging.Balance_20.mpl_colors)
elif var =="ppt":
vmin = -3.0
vmax = 3.0
cmap = mpc.ListedColormap(palettable.cmocean.diverging.Curl_20_r.mpl_colors)
elif ky == "pvalue":
cmap = mpc.ListedColormap(palettable.matplotlib.Inferno_20.hex_colors)
vmin = 0.0
vmax = 1.0
elif ky == "rsquared":
cmap = mpc.ListedColormap(palettable.matplotlib.Viridis_20.hex_colors)
vmin = 0.0
vmax = 1.0
# cmap =
elif ky == "intercept":
cmap = mpc.ListedColormap(palettable.cmocean.sequential.Ice_20_r.mpl_colors)
if var == "tmean":
# vmax = 0.07
# vmin = -0.07
# cmap = mpc.ListedColormap(palettable.cmocean.diverging.Balance_20.mpl_colors)
# ipdb.set_trace()
pass
elif var =="ppt":
vmin = 0
vmax = 1000
# cmap = mpc.ListedColormap(palettable.cmocean.diverging.Curl_20_r.mpl_colors)
return cmap, vmin, vmax
# @jit
def _fitvals(dvt, method="polyfit"):
"""
Takes the ds[var] and performs some form of regression on it
"""
vals = dvt.values
try:
years = pd.to_datetime(dvt.time.values).year
t0 = pd.Timestamp.now()
print("testing with %s from %d to %d starting at: %s" % (
method, | pd.to_datetime(dvt.time.values) | pandas.to_datetime |
import pandas as pd
from scipy import stats
from statsmodels.stats import multicomp as mc
def create_summary_table(data, plot_dims=[], summary_stats=[]):
if len(summary_stats) == 0:
summary_stats = ["mean", "std"]
tmp = data.copy(deep=True)
tmp = tmp.groupby(["Cluster"])[plot_dims].agg(summary_stats)
return tmp
def run_cluster_comps(data=None, ft_cols=None):
"""
Function to determine where statistically sig differences lie between the clusters
:param data:
:return:
"""
# Get the binary columns
bin_fts = [col for col in ft_cols if list(set(data[col])) == [0, 1]]
# Get the continuous columns
cont_fts = [col for col in ft_cols if col not in bin_fts]
# init the sig df and post hoc tests df
sig_results = {"Feature": list(), "p Val": list(), "Stat Test": list()}
post_hocs = pd.DataFrame()
# perform chi squared on the binary
for ft in bin_fts:
crosstab = | pd.crosstab(data["Cluster"], data[ft]) | pandas.crosstab |
from sklearn.model_selection import train_test_split
import os
import shutil
import zipfile
import logging
import pandas as pd
import glob
from tqdm import tqdm
from pathlib import Path
from ..common import Common
from ..util import read_img, get_img_dim
from .dataset import Dataset
# All dataset parsers inheret from Dataset.
class LISA_TL(Dataset):
def __init__(self, lisats_config: dict, config: Common):
"""
lisats_config: dict
Dict of values loaded from the dataset.yaml file
expected keys:
[RAW_ROOT] -> root path of raw dataset.zips
[ZIP] -> name of zip file, with .zip
[CLEAN_ROOT] -> where to extract the cleaned files.
[PREPEND] -> prefix to all .txt and .jpg files outputted
config: Common
A Common object created from loaded yaml files with the configs.
"""
# path to raw file (absolute)
self.raw_path = config.root + lisats_config["RAW_FOLDER"] + lisats_config["ZIP"]
# path to initialization folder (absolute)
self.init_path = config.init_folder + lisats_config["INIT_FOLDER"]
self.annot_file = self.init_path + "allAnnotations.csv"
self.prepend = lisats_config["PREPEND"]
self.config = config
# ===============================================
# initializes dataset into intermediary unzipped and cleaned state.
def init_dataset(self):
"""
unzips lisatl into intermidiary INIT_FOLDER
cleans up files from unzip to minimize disk space.
"""
assert os.path.isfile(self.raw_path) # makes sure that the zip actually exists.
print("Started initializing LISATL!")
# make subfolder inside INIT_FOLDER path.
os.makedirs(self.init_path, exist_ok=True)
# unzips file into directory
with zipfile.ZipFile(self.raw_path, "r") as zip_ref:
# zip_ref.extractall(self.init_path)
for member in tqdm(zip_ref.infolist(), desc='Extracting '):
zip_ref.extract(member, self.init_path)
# rename nightTrain, dayTrain to nightTraining, dayTraining (match annotation file)
shutil.move(self.init_path+"nightTrain/nightTrain", self.init_path+"nightTraining")
shutil.move(self.init_path+"dayTrain/dayTrain", self.init_path+"dayTraining")
# delete some of the unnecessary folders.
delete_folders = ["sample-nightClip1/", "sample-dayClip6/", "nightSequence1", "nightSequence2", "daySequence1", "daySequence2", "nightTrain", "dayTrain"]
for folder in delete_folders:
shutil.rmtree(self.init_path + folder)
# read in all day annotations
total_day_df = []
for dayClip in tqdm([x for x in Path(self.init_path+"Annotations/Annotations/dayTrain/").glob('**/*') if x.is_dir()]):
path = os.path.join(dayClip, "frameAnnotationsBOX.csv")
total_day_df.append( | pd.read_csv(path, sep=";") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""Add model years to an existing Scenario."""
# Sections of the code:
#
# I. Required python packages are imported
# II. Generic utilities for dataframe manipulation
# III. The main function, add_year()
# IV. Function add_year_set() for adding and modifying the sets
# V. Function add_year_par() for copying and modifying each parameter
# VI. Two utility functions, interpolate_1d() and interpolate_2d(), for
# calculating missing values
# %% I) Importing required packages
import numpy as np
import pandas as pd
# %% II) Utility functions for dataframe manupulation
def intpol(y1, y2, x1, x2, x):
"""Interpolate between (*x1*, *y1*) and (*x2*, *y2*) at *x*.
Parameters
----------
y1, y2 : float or pd.Series
x1, x2, x : int
"""
if x2 == x1 and y2 != y1:
print('>>> Warning <<<: No difference between x1 and x2,'
'returned empty!!!')
return []
elif x2 == x1 and y2 == y1:
return y1
else:
y = y1 + ((y2 - y1) / (x2 - x1)) * (x - x1)
return y
def slice_df(df, idx, level, locator, value):
"""Slice a MultiIndex DataFrame and set a value to a specific level.
Parameters
----------
df : pd.DataFrame
idx : list of indices
level: str
locator : list
value : int or str
"""
if locator:
df = df.reset_index().loc[df.reset_index()[level].isin(locator)].copy()
else:
df = df.reset_index().copy()
if value:
df[level] = value
return df.set_index(idx)
def mask_df(df, index, count, value):
"""Create a mask for removing extra values from *df*."""
df.loc[index, df.columns > (df.loc[[index]].notnull().cumsum(
axis=1) == count).idxmax(axis=1).values[0]] = value
def unit_uniform(df):
"""Make units in *df* uniform."""
column = [x for x in df.columns if x in ['commodity', 'emission']]
if column:
com_list = set(df[column[0]])
for com in com_list:
df.loc[df[column[0]] == com, 'unit'] = df.loc[
df[column[0]] == com, 'unit'].mode()[0]
else:
df['unit'] = df['unit'].mode()[0]
return df
# %% III) The main function
def add_year(sc_ref, sc_new, years_new, firstyear_new=None, lastyear_new=None,
macro=False, baseyear_macro=None, parameter='all', region='all',
rewrite=True, unit_check=True, extrapol_neg=None,
bound_extend=True):
"""Add years to *sc_ref* to produce *sc_new*.
:meth:`add_year` does the following:
1. calls :meth:`add_year_set` to add and modify required sets.
2. calls :meth:`add_year_par` to add new years and modifications to each
parameter if needed.
Parameters
-----------
sc_ref : ixmp.Scenario
Reference scenario.
sc_new : ixmp.Scenario
New scenario.
yrs_new : list of int
New years to be added.
firstyear_new : int, optional
New first model year for new scenario.
macro : bool
Add new years to parameters of the MACRO model.
baseyear_macro : int
New base year for the MACRO model.
parameter: list of str or 'all'
Parameters for adding new years.
rewrite: bool
Permit rewriting a parameter in new scenario when adding new years.
check_unit: bool
Harmonize the units for each commodity, if there is inconsistency
across model years.
extrapol_neg: float
When extrapolation produces negative values, replace with a multiple of
the value for the previous timestep.
bound_extend: bool
Duplicate data from the previous timestep when there is only one data
point for interpolation (e.g., permitting the extension of a bound to
2025, when there is only one value in 2020).
"""
# III.A) Adding sets and required modifications
years_new = sorted([x for x in years_new if str(x)
not in set(sc_ref.set('year'))])
add_year_set(sc_ref, sc_new, years_new, firstyear_new, lastyear_new,
baseyear_macro)
# -------------------------------------------------------------------------
# III.B) Adding parameters and calculating the missing values for the
# additonal years
if parameter in ('all', ['all']):
par_list = sorted(sc_ref.par_list())
elif isinstance(parameter, list):
par_list = parameter
elif isinstance(parameter, str):
par_list = [parameter]
else:
print('Parameters should be defined in a list of strings or as'
' a single string!')
if 'technical_lifetime' in par_list:
par_list.insert(0, par_list.pop(par_list.index('technical_lifetime')))
if region in ('all', ['all']):
reg_list = sc_ref.set('node').tolist()
elif isinstance(region, list):
reg_list = region
elif isinstance(region, str):
reg_list = [region]
else:
print('Regions should be defined in a list of strings or as'
' a single string!')
# List of parameters to be ignored (even not copied to the new
# scenario)
par_ignore = ['duration_period']
par_list = [x for x in par_list if x not in par_ignore]
if not macro:
par_macro = ['demand_MESSAGE', 'price_MESSAGE', 'cost_MESSAGE',
'gdp_calibrate', 'historical_gdp', 'MERtoPPP', 'kgdp',
'kpvs', 'depr', 'drate', 'esub', 'lotol', 'p_ref', 'lakl',
'prfconst', 'grow', 'aeei', 'aeei_factor', 'gdp_rate']
par_list = [x for x in par_list if x not in par_macro]
if not sc_new.set('cat_year', {'type_year': 'firstmodelyear'}).empty:
firstyear_new = sc_new.set('cat_year',
{'type_year': 'firstmodelyear'})['year']
else:
firstyear_new = min([int(x) for x in sc_new.set('year').tolist()])
if not sc_ref.set('cat_year', {'type_year': 'firstmodelyear'}).empty:
firstyear_ref = sc_ref.set('cat_year',
{'type_year': 'firstmodelyear'})['year']
else:
firstyear_ref = firstyear_new
for parname in par_list:
# For historical parameters extrapolation permitted (e.g., from
# 2010 to 2015)
if 'historical' in parname:
extrapol = True
yrs_new = [x for x in years_new if x < int(firstyear_new)]
elif int(firstyear_ref) > int(firstyear_new):
extrapol = True
yrs_new = years_new
else:
extrapol = False
yrs_new = years_new
if 'bound' in parname:
bound_ext = bound_extend
else:
bound_ext = True
year_list = [x for x in sc_ref.idx_sets(parname) if 'year' in x]
if len(year_list) == 2 or parname in ['land_output']:
# The loop over "node" is only for reducing the size of tables
for node in reg_list:
add_year_par(sc_ref, sc_new, yrs_new, parname, [node],
firstyear_new, extrapol, rewrite, unit_check,
extrapol_neg, bound_ext)
else:
add_year_par(sc_ref, sc_new, yrs_new, parname, reg_list,
firstyear_new, extrapol, rewrite, unit_check,
extrapol_neg, bound_ext)
sc_new.set_as_default()
print('> All required parameters were successfully '
'added to the new scenario.')
# %% Submodules needed for running the main function
# IV) Adding new years to sets
def add_year_set(sc_ref, sc_new, years_new, firstyear_new=None,
lastyear_new=None, baseyear_macro=None):
"""Add new years to sets.
:meth:`add_year_set` adds additional years to an existing scenario, by
starting to make a new scenario from scratch. After modification of the
year-related sets, all other sets are copied from *sc_ref* to *sc_new*.
See :meth:`add_year` for parameter descriptions.
"""
# IV.A) Treatment of the additional years in the year-related sets
# A.1. Set - year
yrs_old = list(map(int, sc_ref.set('year')))
horizon_new = sorted(yrs_old + years_new)
sc_new.add_set('year', [str(yr) for yr in horizon_new])
# A.2. Set _ type_year
yr_typ = sc_ref.set('type_year').tolist()
sc_new.add_set('type_year', sorted(yr_typ + [str(yr) for yr in years_new]))
# A.3. Set _ cat_year
yr_cat = sc_ref.set('cat_year')
# A.4. Changing the first year if needed
if firstyear_new:
if not yr_cat.loc[yr_cat['type_year'] == 'firstmodelyear'].empty:
yr_cat.loc[yr_cat['type_year'] == 'firstmodelyear',
'year'] = firstyear_new
else:
yr_cat.loc[len(yr_cat.index)] = ['firstmodelyear', firstyear_new]
if lastyear_new:
if not yr_cat.loc[yr_cat['type_year'] == 'lastmodelyear'].empty:
yr_cat.loc[yr_cat['type_year'] == 'lastmodelyear',
'year'] = lastyear_new
else:
yr_cat.loc[len(yr_cat.index)] = ['lastmodelyear', lastyear_new]
# A.5. Changing the base year and initialization year of macro if a new
# year specified
if baseyear_macro:
if not yr_cat.loc[yr_cat['type_year'] == 'baseyear_macro',
'year'].empty:
yr_cat.loc[yr_cat['type_year'] == 'baseyear_macro',
'year'] = baseyear_macro
if not yr_cat.loc[yr_cat['type_year'] == 'initializeyear_macro',
'year'].empty:
yr_cat.loc[yr_cat['type_year'] == 'initializeyear_macro',
'year'] = baseyear_macro
yr_pair = []
for yr in years_new:
yr_pair.append([yr, yr])
yr_pair.append(['cumulative', yr])
yr_cat = yr_cat.append(pd.DataFrame(yr_pair,
columns=['type_year', 'year']),
ignore_index=True
).sort_values('year').reset_index(drop=True)
# A.6. Changing the cumulative years based on the new first model year
if 'firstmodelyear' in set(yr_cat['type_year']):
firstyear_new = int(yr_cat.loc[yr_cat['type_year'] == 'firstmodelyear',
'year'])
yr_cat = yr_cat.drop(yr_cat.loc[(yr_cat['type_year'] == 'cumulative'
) & (yr_cat['year'] < firstyear_new)
].index)
sc_new.add_set('cat_year', yr_cat)
# IV.B) Copying all other sets
set_list = [s for s in sc_ref.set_list() if 'year' not in s]
# Sets with one index set
index_list = [x for x in set_list if not isinstance(sc_ref.set(x),
pd.DataFrame)]
for set_name in index_list:
if set_name not in sc_new.set_list():
sc_new.init_set(set_name, idx_sets=None, idx_names=None)
sc_new.add_set(set_name, sc_ref.set(set_name).tolist())
# The rest of the sets
for set_name in [x for x in set_list if x not in index_list]:
new_set = [x for x in sc_ref.idx_sets(set_name
) if x not in sc_ref.set_list()]
if set_name not in sc_new.set_list() and not new_set:
sc_new.init_set(set_name,
idx_sets=sc_ref.idx_sets(set_name),
idx_names=sc_ref.idx_names(set_name))
sc_new.add_set(set_name, sc_ref.set(set_name))
sc_new.commit('sets added!')
print('> All the sets updated and added to the new scenario.')
# %% V) Adding new years to parameters
def add_year_par(sc_ref, sc_new, yrs_new, parname, reg_list, firstyear_new,
extrapolate=False, rewrite=True, unit_check=True,
extrapol_neg=None, bound_extend=True):
"""Add new years to parameters.
This function adds additional years to a parameter. The value of the
parameter for additional years is calculated mainly by interpolating and
extrapolating data from existing years.
See :meth:`add_year` for parameter descriptions.
"""
# V.A) Initialization and checks
par_list_new = sc_new.par_list()
idx_names = sc_ref.idx_names(parname)
horizon = sorted([int(x) for x in list(set(sc_ref.set('year')))])
node_col = [x for x in idx_names if x in ['node', 'node_loc', 'node_rel']]
year_list = [x for x in idx_names if x in ['year', 'year_vtg', 'year_act',
'year_rel']]
if parname not in par_list_new:
sc_new.check_out()
sc_new.init_par(parname, idx_sets=sc_ref.idx_sets(parname),
idx_names=sc_ref.idx_names(parname))
sc_new.commit('New parameter initiated!')
if node_col:
par_old = sc_ref.par(parname, {node_col[0]: reg_list})
par_new = sc_new.par(parname, {node_col[0]: reg_list})
sort_order = [node_col[0], 'technology',
'commodity', 'mode', 'emission'] + year_list
nodes = par_old[node_col[0]].unique().tolist()
else:
par_old = sc_ref.par(parname)
par_new = sc_new.par(parname)
sort_order = ['technology', 'commodity'] + year_list
nodes = ['N/A']
if not par_new.empty and not rewrite:
print('> Parameter "' + parname + '" already has data in new scenario'
' and left unchanged for node/s: {}.'.format(reg_list))
return
if par_old.empty:
print('> Parameter "' + parname + '" is empty in reference scenario'
' for node/s: {}!'.format(reg_list))
return
# Sorting the data to make it ready for dataframe manupulation
sort_order = [x for x in sort_order if x in idx_names]
if sort_order:
par_old = par_old.sort_values(sort_order).reset_index(drop=True)
rem_idx = [x for x in par_old.columns if x not in sort_order]
par_old = par_old.reindex(columns=sort_order + rem_idx)
sc_new.check_out()
if not par_new.empty and rewrite:
print('> Parameter "' + parname + '" is being removed from new'
' scenario to be updated for node/s in {}...'.format(nodes))
sc_new.remove_par(parname, par_new)
# A uniform "unit" for values in different years
if 'unit' in par_old.columns and unit_check:
par_old = unit_uniform(par_old)
# ---------------------------------------------------------------------------
# V.B) Adding new years to a parameter based on time-related indexes
# V.B.1) Parameters with no time index
if len(year_list) == 0:
sc_new.add_par(parname, par_old)
sc_new.commit(parname)
print('> Parameter "' + parname + '" just copied to new scenario '
'since has no time-related entries.')
# V.B.2) Parameters with one index related to time
elif len(year_list) == 1:
year_col = year_list[0]
df = par_old.copy()
df_y = interpolate_1d(df, yrs_new, horizon, year_col, 'value',
extrapolate, extrapol_neg, bound_extend)
sc_new.add_par(parname, df_y)
sc_new.commit(' ')
print('> Parameter "{}" copied and new years'
' added for node/s: "{}".'.format(parname, nodes))
# V.B.3) Parameters with two indexes related to time (such as 'input')
elif len(year_list) == 2:
year_col = 'year_act'
year_ref = [x for x in year_list if x != year_col][0]
def f(x, i):
return x[i + 1] - x[i] > x[i] - x[i - 1]
year_diff = [x for x in horizon[1:-1] if f(horizon, horizon.index(x))]
print('> Parameter "{}" is being added for node/s'
' "{}"...'.format(parname, nodes))
# Flagging technologies that have lifetime for adding new timesteps
yr_list = [int(x) for x in set(sc_new.set('year')
) if int(x) > int(firstyear_new)]
min_step = min(np.diff(sorted(yr_list)))
par_tec = sc_new.par('technical_lifetime', {'node_loc': nodes})
# Technologies with lifetime bigger than minimum time interval
par_tec = par_tec.loc[par_tec['value'] > min_step]
df = par_old.copy()
if parname == 'relation_activity':
tec_list = []
else:
tec_list = [t for t in (set(df['technology'])
) if t in list(set(par_tec['technology']))]
df_y = interpolate_2d(df, yrs_new, horizon, year_ref, year_col,
tec_list, par_tec, 'value', extrapolate,
extrapol_neg, year_diff, bound_extend)
sc_new.add_par(parname, df_y)
sc_new.commit(parname)
print('> Parameter "{}" copied and new years added'
' for node/s: "{}".'.format(parname, nodes))
# %% VI) Required functions
def interpolate_1d(df, yrs_new, horizon, year_col, value_col='value',
extrapolate=False, extrapol_neg=None, bound_extend=True):
"""Interpolate data with one year dimension.
This function receives a parameter data as a dataframe, and adds new data
for the additonal years by interpolation and extrapolation.
Parameters
----------
df : pandas.DataFrame
The dataframe of the parameter to which new years to be added.
yrs_new : list of int
New years to be added.
horizon: list of int
The horizon of the reference scenario.
year_col : str
The header of the column to which the new years should be added, e.g.
`'year_act'`.
value_col : str
The header of the column containing values.
extrapolate : bool
Allow extrapolation when a new year is outside the parameter years.
extrapol_neg : bool
Allow negative values obtained by extrapolation.
bound_extend : bool
Allow extrapolation of bounds for new years
"""
horizon_new = sorted(horizon + yrs_new)
idx = [x for x in df.columns if x not in [year_col, value_col]]
if not df.empty:
df2 = df.pivot_table(index=idx, columns=year_col, values=value_col)
# To sort the new years smaller than the first year for
# extrapolation (e.g. 2025 values are calculated first; then
# values of 2015 based on 2020 and 2025)
year_before = sorted([x for x in yrs_new if x < min(df2.columns
)], reverse=True)
if year_before and extrapolate:
for y in year_before:
yrs_new.insert(len(yrs_new), yrs_new.pop(yrs_new.index(y)))
for yr in yrs_new:
if yr > max(horizon):
extrapol = True
else:
extrapol = extrapolate
# a) If this new year greater than modeled years, do extrapolation
if yr > max(df2.columns) and extrapol:
if yr == horizon_new[horizon_new.index(max(df2.columns)) + 1]:
year_pre = max([x for x in df2.columns if x < yr])
if len([x for x in df2.columns if x < yr]) >= 2:
year_pp = max([x for x in df2.columns if x < year_pre])
df2[yr] = intpol(df2[year_pre], df2[year_pp],
year_pre, year_pp, yr)
if bound_extend:
df2[yr] = df2[yr].fillna(df2[year_pre])
df2[yr][np.isinf(df2[year_pre])] = df2[year_pre]
if not df2[yr].loc[(df2[yr] < 0) & (df2[year_pre] >= 0)
].empty and extrapol_neg:
df2.loc[(df2[yr] < 0) & (df2[year_pre] >= 0),
yr] = df2.loc[(df2[yr] < 0
) & (df2[year_pre] >= 0),
year_pre] * extrapol_neg
else:
df2[yr] = df2[year_pre]
# b) If the new year is smaller than modeled years, extrapolate
elif yr < min(df2.columns) and extrapol:
year_next = min([x for x in df2.columns if x > yr])
# To make sure the new year is not two steps smaller
cond = (year_next == horizon_new[horizon_new.index(yr) + 1])
if len([x for x in df2.columns if x > yr]) >= 2 and cond:
year_nn = min([x for x in df2.columns if x > year_next])
df2[yr] = intpol(df2[year_next], df2[year_nn],
year_next, year_nn, yr)
df2[yr][np.isinf(df2[year_next])] = df2[year_next]
if not df2[yr].loc[(df2[yr] < 0) & (df2[year_next] >= 0)
].empty and extrapol_neg:
df2.loc[(df2[yr] < 0) & (df2[year_next] >= 0), yr
] = df2.loc[(df2[yr] < 0
) & (df2[year_next] >= 0),
year_next] * extrapol_neg
elif bound_extend and cond:
df2[yr] = df2[year_next]
# c) Otherise, do intrapolation
elif yr > min(df2.columns) and yr < max(df2.columns):
year_pre = max([x for x in df2.columns if x < yr])
year_next = min([x for x in df2.columns if x > yr])
df2[yr] = intpol(df2[year_pre], df2[year_next],
year_pre, year_next, yr)
# Extrapolate for new years if the value exists for the
# previous year but not for the next years
# TODO: here is the place that should be changed if the
# new year should go to the time step before the existing one
if [x for x in df2.columns if x > year_next]:
year_nn = min([x for x in df2.columns if x > year_next])
df2[yr] = df2[yr].fillna(intpol(df2[year_next],
df2[year_nn], year_next,
year_nn, yr))
if not df2[yr].loc[(df2[yr] < 0) & (df2[year_next] >= 0)
].empty and extrapol_neg:
df2.loc[(df2[yr] < 0) & (df2[year_next] >= 0), yr
] = df2.loc[(df2[yr] < 0
) & (df2[year_next] >= 0),
year_next] * extrapol_neg
if bound_extend:
df2[yr] = df2[yr].fillna(df2[year_pre])
df2[yr][np.isinf(df2[year_pre])] = df2[year_pre]
df2 = pd.melt(df2.reset_index(), id_vars=idx,
value_vars=[x for x in df2.columns if x not in idx],
var_name=year_col, value_name=value_col
).dropna(subset=[value_col]).reset_index(drop=True)
df2 = df2.sort_values(idx).reset_index(drop=True)
else:
print('+++ WARNING: The submitted dataframe is empty, so returned'
' empty results!!! +++')
df2 = df
return df2
# %% VI.B) Interpolating parameters with two dimensions related to time
def interpolate_2d(df, yrs_new, horizon, year_ref, year_col, tec_list, par_tec,
value_col='value', extrapolate=False, extrapol_neg=None,
year_diff=None, bound_extend=True):
"""Interpolate parameters with two dimensions related year.
This function receives a dataframe that has 2 time-related columns (e.g.,
"input" or "relation_activity"), and adds new data for the additonal years
in both time-related columns by interpolation and extrapolation.
Parameters
----------
df : pandas.DataFrame
The dataframe of the parameter to which new years to be added.
yrs_new : list of int
New years to be added.
horizon: list of int
The horizon of the reference scenario.
year_ref : str
The header of the first column to which the new years should be added,
e.g. `'year_vtg'`.
year_col : str
The header of the column to which the new years should be added, e.g.
`'year_act'`.
tec_list : list of str
List of technologies in the parameter ``technical_lifetime``.
par_tec : pandas.DataFrame
Parameter ``technical_lifetime``.
value_col : str
The header of the column containing values.
extrapolate : bool
Allow extrapolation when a new year is outside the parameter years.
extrapol_neg : bool
Allow negative values obtained by extrapolation.
year_diff : list of int
List of model years with different time intervals before and after them
bound_extend : bool
Allow extrapolation of bounds for new years based on one data point
"""
def idx_check(df1, df2):
return df1.loc[df1.index.isin(df2.index)]
if df.empty:
return df
print('+++ WARNING: The submitted dataframe is empty, so'
' returned empty results!!! +++')
df_tec = df.loc[df['technology'].isin(tec_list)]
idx = [x for x in df.columns if x not in [year_col, value_col]]
df2 = df.pivot_table(index=idx, columns=year_col, values='value')
df2_tec = df_tec.pivot_table(index=idx, columns=year_col, values='value')
# -------------------------------------------------------------------------
# First, changing the time interval for the transition period
# (e.g., year 2010 in old R11 model transits from 5 year to 10 year)
horizon_new = sorted(horizon + [x for x in yrs_new if x not in horizon])
def f(x, i):
return x[i + 1] - x[i] > x[i] - x[i - 1]
yr_diff_new = [x for x in horizon_new[1:-1] if f(horizon_new,
horizon_new.index(x))]
# Generating duration_period_sum matrix for masking
df_dur = pd.DataFrame(index=horizon_new[:-1], columns=horizon_new[1:])
for i in df_dur.index:
for j in [x for x in df_dur.columns if x > i]:
df_dur.loc[i, j] = j - i
# Adding data for new transition year
if yr_diff_new and tec_list and year_diff not in yr_diff_new:
yrs = [x for x in horizon if x <= yr_diff_new[0]]
year_next = min([x for x in df2.columns if x > yr_diff_new[0]])
df_yrs = slice_df(df2_tec, idx, year_ref, yrs, [])
if yr_diff_new[0] in df2.columns:
df_yrs = df_yrs.loc[~pd.isna(df_yrs[yr_diff_new[0]]), :]
df_yrs = df_yrs.append(slice_df(df2_tec, idx, year_ref,
[year_next], []),
ignore_index=False).reset_index()
df_yrs = df_yrs.sort_values(idx).set_index(idx)
for yr in sorted([x for x in list(set(df_yrs.reset_index()[year_ref])
) if x < year_next]):
yr_next = min([x for x in horizon_new if x > yr])
d = slice_df(df_yrs, idx, year_ref, [yr], [])
d_n = slice_df(df_yrs, idx, year_ref, [yr_next], yr)
if d_n[year_next].loc[~pd.isna(d_n[year_next])].empty:
if [x for x in horizon_new if x > yr_next]:
yr_nn = min([x for x in horizon_new if x > yr_next])
else:
yr_nn = yr_next
d_n = slice_df(df_yrs, idx, year_ref, [yr_nn], yr)
d_n = d_n.loc[d_n.index.isin(d.index), :]
d = d.loc[d.index.isin(d_n.index), :]
d[d.isnull() & d_n.notnull()] = d_n
df2.loc[df2.index.isin(d.index), :] = d
cond1 = (df_dur.index <= yr_diff_new[0])
cond2 = (df_dur.columns >= year_next)
subt = yr_diff_new[0] - horizon_new[horizon_new.index(yr_diff_new[0]
) - 1]
df_dur.loc[cond1, cond2] = df_dur.loc[cond1, cond2] - subt
# -------------------------------------------------------------------------
# Second, adding year_act of new years if year_vtg is in existing years
for yr in yrs_new:
if yr > max(horizon):
extrapol = True
else:
extrapol = extrapolate
# a) If this new year is greater than modeled years, do extrapolation
if yr > horizon_new[horizon_new.index(max(df2.columns))] and extrapol:
year_pre = max([x for x in df2.columns if x < yr])
year_pp = max([x for x in df2.columns if x < year_pre])
df2[yr] = intpol(df2[year_pre], df2[year_pp],
year_pre, year_pp, yr)
df2[yr][np.isinf(df2[year_pre].shift(+1))
] = df2[year_pre].shift(+1)
df2[yr] = df2[yr].fillna(df2[year_pre])
j = horizon_new.index(yr)
if yr - horizon_new[j - 1] >= horizon_new[j - 1
] - horizon_new[j - 2]:
df2[yr].loc[(pd.isna(df2[year_pre].shift(+1))
) & (~pd.isna(df2[year_pp].shift(+1)))] = np.nan
cond = (df2[yr] < 0) & (df2[year_pre].shift(+1) >= 0)
if not df2[yr].loc[cond].empty and extrapol_neg:
df2.loc[cond, yr] = df2.loc[cond, year_pre] * extrapol_neg
# b) Otherise, do intrapolation
elif yr > min(df2.columns) and yr < max(df2.columns):
year_pre = max([x for x in df2.columns if x < yr])
year_next = min([x for x in df2.columns if x > yr])
df2[yr] = intpol(df2[year_pre], df2[year_next],
year_pre, year_next, yr)
df2_t = df2.loc[df2_tec.index, :].copy()
# This part calculates the missing value if only the previous
# timestep has a value (and not the next)
if tec_list:
cond = (pd.isna(df2_t[yr])) & (~pd.isna(df2_t[year_pre]))
df2_t[yr].loc[cond] = intpol(df2_t[year_pre],
df2_t[year_next].shift(-1),
year_pre, year_next, yr)
# Treating technologies with phase-out in model years
if [x for x in df2.columns if x < year_pre]:
year_pp = max([x for x in df2.columns if x < year_pre])
cond1 = (pd.isna(df2_t[yr])) & (~pd.isna(df2_t[year_pre]))
cond2 = (pd.isna(df2_t[year_pre].shift(-1)))
df2_t[yr].loc[cond1 & cond2
] = intpol(df2_t[year_pre], df2_t[year_pp],
year_pre, year_pp, yr)
cond = (df2_t[yr] < 0) & (df2_t[year_pre] >= 0)
if not df2_t[yr].loc[cond].empty and extrapol_neg:
df2_t.loc[cond, yr] = df2_t.loc[cond, year_pre
] * extrapol_neg
df2.loc[df2_tec.index, :] = df2_t
df2[yr][np.isinf(df2[year_pre])] = df2[year_pre]
df2 = df2.reindex(sorted(df2.columns), axis=1)
# -------------------------------------------------------------------------
# Third, adding year_vtg of new years
for yr in yrs_new:
# a) If this new year is greater than modeled years, do extrapolation
if yr > max(horizon):
year_pre = horizon_new[horizon_new.index(yr) - 1]
year_pp = horizon_new[horizon_new.index(yr) - 2]
df_pre = slice_df(df2, idx, year_ref, [year_pre], yr)
df_pp = slice_df(df2, idx, year_ref, [year_pp], yr)
df_yr = intpol(df_pre, idx_check(df_pp, df_pre),
year_pre, year_pp, yr)
df_yr[np.isinf(df_pre)] = df_pre
# For those technolofies with one value for each year
df_yr.loc[ | pd.isna(df_yr[yr]) | pandas.isna |
#!/usr/bin/env python
from glob import glob
import os
import numpy as np
import pandas as pd
from natsort import natsorted
def prepare_forex(asset, path):
if not os.path.exists(path + asset + "/h5"):
os.makedirs(path + asset + "/h5")
def dateparse(date, time):
return pd.to_datetime(date + time, format='%Y%m%d%H%M%S%f')
def process_data(file):
data = pd.read_csv(file, header=None, names=["Date", "Time", "Bid", "Ask"], index_col="datetime",
parse_dates={'datetime': ['Date', 'Time']}, date_parser=dateparse)
# Add the midquote
data["Midquote"] = (data["Bid"] + data["Ask"]) / 2
data.drop(["Bid", "Ask"], axis=1, inplace=True)
data = data.iloc[:, 0]
# Shift the index such that trading time is from 0-24h
idx_1 = data[:'2014-08-02'].index + pd.Timedelta('8h')
idx_2 = data['2014-08-03':].index + pd.Timedelta('6h')
data.index = idx_1.union(idx_2)
# Change the first and the last timestamp
def change_timestamp(x):
if len(x) > 0:
x[0] = x[0].replace(hour=0, minute=0, second=0, microsecond=0)
x[-1] = x[-1].replace(hour=23, minute=59, second=59, microsecond=999999)
return x
new_idx = data.index.to_series().groupby(pd.TimeGrouper("1d")).apply(change_timestamp)
data.index = new_idx
# Save the data to the disk
for day, data_day in data.groupby(pd.TimeGrouper("1d")):
if data_day.size > 0:
file = path + asset + "/h5/" + day.strftime("%Y-%m-%d") + ".h5"
data_day.to_hdf(file, "table")
# List all files and loop over them
file_list = natsorted(glob(path + asset + "/raw/*"))
for file in file_list:
process_data(file)
def prepare_nyse(asset, path):
if not os.path.exists(path + asset + "/h5"):
os.makedirs(path + asset + "/h5")
def dateparse(date, time):
time = float(time)
time_s = int(time)
f = time - time_s
m, s = divmod(time_s, 60)
h, m = divmod(m, 60)
return pd.to_datetime(date + str(h).zfill(2) + str(m).zfill(2) + str(s).zfill(2) + ("%.6f" % f)[2:],
format='%Y%m%d%H%M%S%f') # TODO: improve rounding of the fractional part
def process_data(file):
data = pd.read_csv(file, usecols=["Date", "Time", "Bid", "Ask"], index_col="datetime",
parse_dates={'datetime': ['Date', 'Time']}, date_parser=dateparse)
# Add the midquote
data["Midquote"] = (data["Bid"] + data["Ask"]) / 2
data.drop(["Bid", "Ask"], axis=1, inplace=True)
data = data.iloc[:, 0]
# Shift the index such that trading time is from 0-6:30h
data.index -= pd.to_timedelta("9.5h")
# Change the first and the last timestamp
def change_timestamp(x):
if len(x) > 0:
x[0] = x[0].replace(hour=0, minute=0, second=0, microsecond=0)
x[-1] = x[-1].replace(hour=6, minute=30, second=0, microsecond=0)
return x
new_idx = data.index.to_series().groupby(pd.TimeGrouper("1d")).apply(change_timestamp)
data.index = new_idx
# Save the data to the disk
for day, data_day in data.groupby(pd.TimeGrouper("1d")):
if data_day.size > 0:
file = path + asset + "/h5/" + day.strftime("%Y-%m-%d") + ".h5"
data_day.to_hdf(file, "table")
# List all loop over them
file_list = natsorted(glob(path + asset + "/raw/**/" + asset + "_quotes*.txt", recursive=True))
for file in file_list:
process_data(file)
def remove_bad_days_forex(asset, path):
file_list = glob(path + asset + "/" + "h5" + "/*")
for file in file_list:
print(file)
year, month, day = os.path.basename(file).replace(".h5", "").split("-")
# Kick Jan 1, Dec 25
if ((day == "01") and (month == "01")) or ((day == "25") and (month == "12")):
os.remove(file)
continue
# Check whether the trading time is less than 22:30h
data = pd.read_hdf(file, "table")
check = data.index[-2] - data.index[1] < pd.to_timedelta("22.5h")
if check:
os.remove(file)
def remove_bad_days_nyse(asset, path):
file_list = pd.Series(glob(path + asset + "/" + "h5" + "/*"))
ff = np.array([int(os.path.basename(f).replace(".h5", "").replace("-", "")) for f in file_list])
bad_days = np.array([20010608, 20010703, 20011123, 20011224,
20020705, 20020911, 20021129, 20021224,
20030703, 20031128, 20031224, 20031226,
20041126, 20051125, 20060703, 20061124,
20070703, 20071123, 20071224,
20080703, 20081128, 20081224,
20091127, 20091224,
20101126, 20111125,
20120703, 20121123, 20121224,
20130606, 20130703, 20131129, 20131224,
20140703, 20141128, 20141224,
20151127, 20151224,
20161125,
20170703, 20171124,
20180703, 20181123, 20181224,
20150128
])
flag = np.isin(ff, bad_days)
for file in file_list[flag]:
os.remove(file)
def create_index(asset_list, path, index_name):
if not os.path.exists(path + index_name + "/h5"):
os.makedirs(path + index_name + "/h5")
file_list = [pd.Series(glob(path + asset + "/" + "h5" + "/*")) for asset in asset_list]
file_list = | pd.concat(file_list) | pandas.concat |
#!/usr/bin/env python3
from os.path import join as pjoin
from os.path import isfile as os_isfile
import numpy as np
import pandas as pd
from scipy.stats import linregress as stats_linregress
from scipy.cluster import hierarchy as scipy_hierarchy
from sklearn import decomposition as skl_decomposition
from sklearn.model_selection import train_test_split as skl_train_test_split
import sklearn.cluster as skl_cluster
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.ticker import AutoMinorLocator
from helpyr import logger
from helpyr import data_loading
from helpyr import kwarg_checker
from helpyr import figure_helpyr
from data_wrangling.omnipickle_manager import OmnipickleManager
import data_wrangling.global_settings as settings
# For reference
Qs_column_names = [
# Timing and meta data
#'elapsed-time sec', <- Calculate this column later
'timestamp', 'missing ratio', 'vel', 'sd vel', 'number vel',
'exp_time',
# Bedload transport masses (g)
'Bedload all', 'Bedload 0.5', 'Bedload 0.71', 'Bedload 1',
'Bedload 1.4', 'Bedload 2', 'Bedload 2.8', 'Bedload 4',
'Bedload 5.6', 'Bedload 8', 'Bedload 11.2', 'Bedload 16',
'Bedload 22', 'Bedload 32', 'Bedload 45',
# Grain counts
'Count all', 'Count 0.5', 'Count 0.71', 'Count 1', 'Count 1.4',
'Count 2', 'Count 2.8', 'Count 4', 'Count 5.6', 'Count 8',
'Count 11.2', 'Count 16', 'Count 22', 'Count 32', 'Count 45',
# Statistics
'D10', 'D16', 'D25', 'D50', 'D75', 'D84', 'D90', 'D95', 'Dmax'
]
gsd_column_names = [
# Stats
'Sigmag', 'Dg', 'La', 'D90', 'D50', 'D10', 'Fsx',
# Grain Size Fractions (counts)
'0.5', '0.71', '1', '1.4', '2', '2.8', '4',
'5.6', '8', '11.3', '16', '22.6', '32',
# Scan name (ex: 3B-f75L-t60-8m )
'scan_name',
]
## Saved PCA Data will have the following format
# pca_dict = {
# 'evectors' : evectors or {exp_code : evectors},
# 'evalues' : evalues or {exp_code : evalues},
# 'PCs' : PCs or {exp_code : PCs},
# 'keep_modes' : keep_modes or {exp_code : keep_modes}, # suggested k
# 'is_concat' : is_concat,
# }
class EOSC510Project:
def __init__(self, debug_mode=False):
# Start up logger
self.log_filepath = f"{settings.log_dir}/eosc510_project.txt"
self.logger = logger.Logger(self.log_filepath, default_verbose=True)
self.logger.begin_output("EOSC 510 project")
# Reload omnimanager
self.omnimanager = OmnipickleManager(self.logger)
self.omnimanager.restore()
# Initialize variables
self.pca_data = None
self.rolling_missing_tolerance = 0.8
self.p1_links = {} #{exp_code : p1_agglom_links <- note: never changes
# Setup plotting variables
self.stable_subplots = figure_helpyr.StableSubplots()
self.figure_saver = figure_helpyr.FigureSaver(
figure_extension='png',
logger = self.logger,
debug_mode = debug_mode,
figure_root_dir = settings.figure_destination,
figure_sub_dir = 'eosc510',
)
self.save_figure = self.figure_saver.save_figure # for older uses
def load_raw_data(self):
# Reload lighttable (Qs) data from files into omnipickle
reload_kwargs = {
'add_feed' : True,
#'cols_to_keep' :
}
self.omnimanager.reload_Qs_data(**reload_kwargs)
# Load all Qs data into a local variable
self.Qs_data = {}
for exp_code in self.omnimanager.experiments.keys():
experiment = self.omnimanager.experiments[exp_code]
self.Qs_data[exp_code] = experiment.accumulated_data
self.clean_Qs_data()
# Load surface gsd data
#self.omnimanager.reload_gsd_data()
def clean_Qs_data(self):
self.logger.write_blankline()
self.outliers = {}
self.prominence = {}
self.p_window = 5 # Note, this includes the center
self.p_tolerance = 250
self.logger.write(f"Note: Outliers are currently calculated as " +\
f"any point +-{self.p_tolerance} g/s from the average " +\
f"of the neighboring {self.p_window-1} nodes (prominence)")
for exp_code in self.Qs_data.keys():
#for exp_code in ['1A']:
self.logger.write(f"Cleaning {exp_code}")
experiment = self.omnimanager.experiments[exp_code]
raw_pd_data = self.Qs_data[exp_code]
bedload_all_pd = raw_pd_data['Bedload all']
exp_time_hrs = raw_pd_data['exp_time_hrs'].values
# Find prominence
# Measure of how different the target node is from the average of
# the adjacent cells. I can't find a rolling window type that
# excludes the center node (kinda like image processing kernels
# can) so I can average only the adjacent nodes, hence the more
# complicated prominence equation.
p_roller = bedload_all_pd.rolling(
self.p_window, min_periods=1, center=True)
p_sum = p_roller.sum()
prominence = \
(1 + 1/(self.p_window-1)) * bedload_all_pd - \
p_sum / (self.p_window - 1)
# Identify outliers
very_pos = prominence > self.p_tolerance
very_neg = prominence < -self.p_tolerance
is_outlier = very_pos | very_neg
outliers_idx = np.where(is_outlier)[0]
# Set outliers to NaN
#outliers = bedload_all_pd[is_outlier].copy()
#bedload_all_pd.loc[is_outlier] = np.NaN
#
## Some debugging plots
#plt.figure()
#plt.scatter(exp_time_hrs, bedload_all_pd, color='b')
#plt.scatter(exp_time_hrs[is_outlier], outliers, color='r')
#plt.title(f"bedload all {exp_code}")
#plt.figure()
#plt.scatter(exp_time_hrs, prominence)
#plt.hlines([-250, 250], 0, 8)
#plt.title(f"prominence {exp_code}")
#plt.show()
#assert(False)
#if False:
# Remove outliers
# Note this changes self.Qs_data
bedload_columns = [
'Bedload all', 'Bedload 0.5', 'Bedload 0.71', 'Bedload 1',
'Bedload 1.4', 'Bedload 2', 'Bedload 2.8', 'Bedload 4',
'Bedload 5.6', 'Bedload 8', 'Bedload 11.2', 'Bedload 16',
'Bedload 22', 'Bedload 32', 'Bedload 45',
]
#outliers_bedload = bedload_all_pd.iloc[outliers_idx].copy().values
# Save data
self.prominence[exp_code] = prominence.copy()
self.prominence[exp_code].index = exp_time_hrs
outliers = bedload_all_pd[is_outlier].copy()
outliers.index = exp_time_hrs[is_outlier]
#outliers.reset_index(drop=True, inplace=True)
self.outliers[exp_code] = outliers
col_idx = np.where(np.in1d(raw_pd_data.columns, bedload_columns))[0]
raw_pd_data.iloc[outliers_idx, col_idx] = np.nan
def _plot_outliers_prominence(self, save_plots=True):
# Plots the outliers and prominance for each experiment. Assumes
# clean_Qs_data has been called and that it uses the prominence method
self.logger.write_blankline()
self.logger.write("Plotting outliers")
fig, axs2D = plt.subplots(nrows=4, ncols=4, sharex=True, figsize=(12,8),
gridspec_kw={'height_ratios' : [2,1.5,2,1.5],},
)
axs = axs2D.flatten()
exp_codes = sorted(self.Qs_data.keys())
p_tolerance = self.p_tolerance
d_min, d_max = [0, 1]
p_min, p_max = [-2*p_tolerance, 2*p_tolerance]
data_color = '#1f77b4' # faded blue
outlier_color = 'r'
for ax_id, exp_code in enumerate(exp_codes):
# Get the right data and prominence axes
ax_id += 4 if ax_id > 3 else 0
d_ax, p_ax = axs[ax_id], axs[ax_id + 4]
# Get the data
raw_pd_data = self.Qs_data[exp_code]
outliers = self.outliers[exp_code]
prominence = self.prominence[exp_code]
# Plot the bedload data that will be kept
bedload_all = raw_pd_data['Bedload all'].values
exp_time_hrs = raw_pd_data['exp_time_hrs'].values
d_ax.scatter(exp_time_hrs, bedload_all, label='Keep',
marker='.', c=data_color)
# Plot the outlier data that will not be kept
outlier_bedload = outliers.values
outlier_hrs = outliers.index
d_ax.scatter(outlier_hrs, outlier_bedload, label='Outlier',
marker='o', color=outlier_color, facecolors='none')
# Plot the prominence values
is_outlier = prominence.index.isin(outlier_hrs)
is_keeper = np.logical_not(is_outlier)
p_ax.scatter(prominence.index[is_keeper],
prominence.values[is_keeper], label='Keep',
marker='.', c=data_color)
p_ax.scatter(prominence.index[is_outlier],
prominence.values[is_outlier], label='Outlier',
marker='o', color=outlier_color, facecolors='none')
p_ax.hlines([-p_tolerance, p_tolerance], 0, 8)
# Get the data and prominence plot limits
d_ylim = d_ax.get_ylim()
d_min = d_min if d_min < d_ylim[0] else d_ylim[0]
d_max = d_max if d_max > d_ylim[1] else d_ylim[1]
p_ylim = p_ax.get_ylim()
p_min = p_min if p_min < p_ylim[0] else p_ylim[0]
p_max = p_max if p_max > p_ylim[1] else p_ylim[1]
for ax_id, exp_code in enumerate(exp_codes):
ax_id += 4 if ax_id > 3 else 0
# Fix the y axis limits so like plots share the y limits
axs[ax_id].set_ylim((d_min, d_max))
axs[ax_id + 4].set_ylim((p_min, p_max))
axs[ax_id + 4].yaxis.set_minor_locator(AutoMinorLocator())
# Format ticks and add exp_code text
for ax in (axs[ax_id], axs[ax_id+4]):
ax.tick_params( bottom=True, top=True, left=True, right=True,
which='both', labelbottom=False, labelleft=False,
)
ax.text(0.95, 0.95, exp_code, va='top', ha='right',
bbox={
'fill' : False,
'visible' : False,
'pad' : 0},
transform=ax.transAxes)
# Label fontsize
label_fontsize = 25
# Format left column
for row in [0,1,2,3]:
label_rotation = 'vertical'
if row in [0,2]:
axs2D[row,0].tick_params(labelleft=True,)
#axs2D[row,0].set_ylabel(f"Bedload (g/s)")
# Set common y label
fig.text(0.001, 0.625, f"Bedload (g/s)", va='center',
usetex=True, fontsize=label_fontsize,
rotation=label_rotation)
elif row in [1,3]:
axs2D[row,3].tick_params(labelright=True,)
#axs2D[row,3].set_ylabel(f"Prominence (g/s)")
# Set common y label
fig.text(0.965, 0.375, f"Prominence (g/s)", va='center',
usetex=True, fontsize=label_fontsize,
rotation=label_rotation)
# Format bottom row
for col in [0,1,2,3]:
axs2D[3,col].tick_params(labelbottom=True)
#axs2D[3,col].set_xlabel(f"Experiment time (hrs)")
# Set common x label
fig.text(0.5, 0.01, f"Experiment time (hrs)", ha='center', usetex=True,
fontsize=label_fontsize)
fig.tight_layout()
fig.subplots_adjust(top=0.95, left=0.065, bottom=0.075, right=0.925)
# Add legend to one of the subplots
#axs2D[0,0].legend(loc = 'upper right', bbox_to_anchor = (0.99, 0.85),
# handletextpad = 0.1, borderpad = 0.1)
axs2D[0,3].legend(loc = 'upper left', bbox_to_anchor = (1.01, 0.85),
handletextpad = 0.1, borderpad = 0.1)
if save_plots:
self.save_figure(fig_name_parts=[f"outliers_prominence_all_exp"])
plt.show()
def _plot_outliers_mean(self, exp_code, time, bedload, o_time, o_bedload, threshold, std_scale, rrmean, rrstd, crmean, crstd):
## This function is outdated.
plt.figure()
plt.title(f"{exp_code} Bedload all")
plt.scatter(time, bedload,
label='Keep')
plt.scatter(o_time, o_bedload,
label='Remove')
plt.xlim((-1, 10))
xlim = plt.xlim()
plt.ylim((0, 900))
plt.yticks(np.linspace(0, 900, 10))
plt.hlines([threshold], xlim[0], xlim[1],
linewidth=0.5, label='Threshold')
plt.plot(time, rrmean, c='r',
linewidth=0.5, label='Old moving mean')
plt.plot(time, rrmean + std_scale * rrstd, c='orange',
linewidth=0.5, label=rf"Old {std_scale}$\sigma$")
#plt.plot(time, rrmean - std_scale * rrstd, c='b', linewidth=0.5)
plt.plot(time, crmean, c='g',
linewidth=0.5, label='New moving mean')
plt.plot(time, crmean + std_scale * crstd, c='c',
linewidth=0.5, label=rf"New {std_scale}$\sigma$")
#plt.plot(time, crmean - std_scale * crstd, c='b', linewidth=0.5)
plt.legend()
if save_plots:
self.save_figure(fig_name_parts=[f"outliers_{exp_code}"])
def analyze_pca_individually(self, **kwargs):
""" Perform PCA on each experiment separately. """
check = kwarg_checker.get_check_kwarg_fu(kwargs)
save_output = check('save_output', default=False)
make_plots = check('make_plots', default=True)
self.logger.write_blankline()
pca_output = {
'evectors' : {},
'evalues' : {},
'PCs' : {},
'keep_modes' : {},
'is_concat' : False,
}
for exp_code in self.Qs_data.keys():
output = self.logger.run_indented_function(
self._analyze_pca_experiment, kwargs={
'exp_code' : exp_code, 'save_output' : save_output,
'make_plots' : make_plots,
'save_plots' : False,
},
before_msg=f"Analyzing {exp_code}",
after_msg="")
if save_output:
pca_output['evectors'][exp_code] = output['evectors']
pca_output['evalues'][exp_code] = output['evalues']
pca_output['PCs'][exp_code] = output['PCs']
pca_output['keep_modes'][exp_code] = output['keep_modes']
if save_output:
file_name = "pca-output_individual-data"
self.save_pca_output(file_name, pca_output)
def analyze_pca_all(self, **kwargs):
""" Perform PCA on all experiments combined. """
check = kwarg_checker.get_check_kwarg_fu(kwargs)
save_output = check('save_output', default=False)
make_plots = check('make_plots', default=True)
save_plots = check('save_plots', default=True)
self.logger.write_blankline()
frames = []
#frames = self.Qs_data.values()
for i, exp_code in enumerate(sorted(self.Qs_data.keys())):
data = self.Qs_data[exp_code]
data['exp_time_hrs'] += 8*i
frames.append(data)
all_data = pd.concat(frames)
self.logger.run_indented_function(
self._analyze_pca_all, kwargs={'raw_pd_data' : all_data,
'save_output' : save_output, 'make_plots' : make_plots,
'save_plots' : save_plots},
before_msg=f"Analyzing all data",
after_msg="")
def _analyze_pca_experiment(self, exp_code, **kwargs):
check = kwarg_checker.get_check_kwarg_fu(kwargs)
save_output = check('save_output', default=False)
make_plots = check('make_plots', default=True)
save_plots = check('save_plots', default=False)
experiment = self.omnimanager.experiments[exp_code]
raw_pd_data = self.Qs_data[exp_code]
fig_name_parts = [exp_code]
extra_cols = self.prep_raw_data(raw_pd_data)
exp_time_hrs = extra_cols['exp_time_hrs']
grain_sizes = extra_cols['grain_sizes']
bedload_all = extra_cols['bedload_all']
pca_output = self.do_pca(raw_pd_data)
model = pca_output['model']
PCs = pca_output['PCs']
evectors = pca_output['evectors']
evalues = pca_output['evalues']
data_stddev = pca_output['data_stddev']
data_mean = pca_output['data_mean']
pca_codes = pca_output.pop('pca_codes')
fig_name_parts += pca_codes
self._plot_eigenvalues(
exp_code = exp_code,
evalues = evalues,
fig_name_parts = fig_name_parts,
save_plots=save_plots)
# Record the number of modes that sum to over 90% or 95% of variance
# Based on the eigenvalues plots
self.keep_modes_95 = {
'1A' : 8,
'1B' : 7,
'2A' : 8,
'2B' : 8,
'3A' : 7,
'3B' : 8,
'4A' : 8,
'5A' : 8,
}
self.keep_modes_90 = {
'1A' : 7,
'1B' : 6,
'2A' : 6,
'2B' : 7,
'3A' : 6,
'3B' : 6,
'4A' : 6,
'5A' : 7,
}
var_threshold = 90
if var_threshold == 95:
keep_modes_dict = self.keep_modes_95
elif var_threshold == 90:
keep_modes_dict = self.keep_modes_90
if exp_code in keep_modes_dict:
keep_modes = keep_modes_dict[exp_code]
explained = np.sum(evalues[0:keep_modes])
self.logger.write(f"Keeping {keep_modes} modes ({explained:0.2%} of variance)")
fig_name_parts_k = [f"{var_threshold}p-k{keep_modes}"] + fig_name_parts
else:
plt.show()
return
if make_plots:
# Plot PCA variables
self._plot_PCs(
exp_code = exp_code,
exp_time_hrs = exp_time_hrs,
PCs = PCs,
keep_modes = keep_modes,
fig_name_parts = fig_name_parts_k,
ylim=(-25, 50),
save_plots=save_plots)
self._plot_PCs(
exp_code = exp_code,
exp_time_hrs = exp_time_hrs,
PCs = PCs,
keep_modes = keep_modes,
fig_name_parts = fig_name_parts_k,
save_plots=save_plots)
self._plot_PCs_comparisons(
exp_code = exp_code,
PCs = PCs,
keep_modes = keep_modes,
fig_name_parts = fig_name_parts_k,
save_plots=save_plots)
self._plot_eigenvectors(
exp_code = exp_code,
grain_sizes = grain_sizes,
evectors = evectors,
keep_modes = keep_modes,
fig_name_parts = fig_name_parts_k,
save_plots=save_plots)
# Reconstruct
recon_k = PCs[:, 0:keep_modes] @ evectors[0:keep_modes, :]
recon_1 = np.outer(PCs[:, 0], evectors[0, :])
# Rescale
rescale_fu = lambda recon_data: recon_data * data_stddev + data_mean
bedload_all_fu = lambda data: np.sum(data, axis=1)
recon_bedload_all_k = bedload_all_fu(rescale_fu(recon_k))
recon_bedload_all_1 = bedload_all_fu(rescale_fu(recon_1))
if make_plots:
# Plot reconstructions
recon_name = "bedload all"
recon_fig_name = recon_name.replace(' ', '-')
fig_name_parts_k = [recon_fig_name] + fig_name_parts_k
self._plot_single_reconstruction(
exp_code = exp_code,
name = recon_name,
k = keep_modes,
time = exp_time_hrs,
original = bedload_all,
recon = recon_bedload_all_k,
fig_name_parts=fig_name_parts_k,
save_plots=save_plots)
self._plot_reconstruction_fit_single(
exp_code = exp_code,
name = recon_name,
k = keep_modes,
original = bedload_all,
recon = recon_bedload_all_k,
fig_name_parts=fig_name_parts_k,
save_plots=save_plots)
fig_name_parts_1 = [recon_fig_name, f"k1"] + fig_name_parts
self._plot_single_reconstruction(
exp_code = exp_code,
name = recon_name,
k = 1,
time = exp_time_hrs,
original = bedload_all,
recon = recon_bedload_all_1,
fig_name_parts=fig_name_parts_1,
save_plots=save_plots)
self._plot_reconstruction_fit_single(
exp_code = exp_code,
name = recon_name,
k = 1,
original = bedload_all,
recon = recon_bedload_all_1,
fig_name_parts=fig_name_parts_1,
save_plots=save_plots)
#plt.show()
plt.close('all')
if save_output:
# Save the pca output for other use.
pca_output = {
'evectors' : evectors,
'evalues' : evalues,
'PCs' : PCs,
'keep_modes' : keep_modes,
}
return pca_output
else:
return None
def _analyze_pca_all(self, raw_pd_data, **kwargs):
check = kwarg_checker.get_check_kwarg_fu(kwargs)
save_output = check('save_output', default=False)
make_plots = check('make_plots', default=True)
save_plots = check('save_plots', default=True)
exp_code = 'All data' # Hacky solution
fig_name_parts = ['all-Qs']
extra_cols = self.prep_raw_data(raw_pd_data)
exp_time_hrs = extra_cols['exp_time_hrs']
grain_sizes = extra_cols['grain_sizes']
bedload_all = extra_cols['bedload_all']
pca_output = self.do_pca(raw_pd_data)
model = pca_output['model']
PCs = pca_output['PCs']
evectors = pca_output['evectors']
evalues = pca_output['evalues']
data_stddev = pca_output['data_stddev']
data_mean = pca_output['data_mean']
pca_codes = pca_output.pop('pca_codes')
fig_name_parts += pca_codes
if make_plots:
self._plot_eigenvalues(
exp_code = exp_code,
evalues = evalues,
fig_name_parts = fig_name_parts,
save_plots=save_plots)
# Record the number of modes that sum to over 90% or 95% of variance
# Based on the eigenvalues plots
self.keep_modes_all_k3 = 3
self.keep_modes_all_90 = 7
self.keep_modes_all_95 = 8
var_threshold = 3
if var_threshold == 95:
keep_modes = self.keep_modes_all_95
elif var_threshold == 90:
keep_modes = self.keep_modes_all_90
elif var_threshold == 3:
keep_modes = self.keep_modes_all_k3
if keep_modes is not None:
explained = np.sum(evalues[0:keep_modes])
self.logger.write(f"Keeping {keep_modes} modes ({explained:0.2%} of variance)")
fig_name_parts_k = [f"{var_threshold}p-k{keep_modes}"] + fig_name_parts
else:
if make_plots:
plt.show()
else:
self.logger.write("No modes specified. Aborting.")
return
if make_plots:
# Plot PCA variables
self._plot_PCs(
exp_code = exp_code,
exp_time_hrs = exp_time_hrs,
PCs = PCs,
keep_modes = keep_modes,
fig_name_parts = fig_name_parts_k,
ylim=(-25, 50),
save_plots=save_plots)
#self._plot_PCs_comparisons(
# exp_code = exp_code,
# PCs = PCs,
# keep_modes = keep_modes,
# fig_name_parts = fig_name_parts_k,
# save_plots=save_plots)
self._plot_eigenvectors(
exp_code = exp_code,
grain_sizes = grain_sizes,
evectors = evectors,
keep_modes = keep_modes,
fig_name_parts = fig_name_parts_k,
save_plots=save_plots)
# Reconstruct
recon_k = PCs[:, 0:keep_modes] @ evectors[0:keep_modes, :]
recon_1 = np.outer(PCs[:, 0], evectors[0, :])
# Rescale
rescale_fu = lambda recon_data: recon_data * data_stddev + data_mean
bedload_all_fu = lambda data: np.sum(data, axis=1)
recon_bedload_all_k = bedload_all_fu(rescale_fu(recon_k))
recon_bedload_all_1 = bedload_all_fu(rescale_fu(recon_1))
if make_plots:
# Plot reconstructions
recon_name = "bedload all"
recon_fig_name = recon_name.replace(' ', '-')
fig_name_parts_k = [recon_fig_name] + fig_name_parts_k
self._plot_single_reconstruction(
exp_code = exp_code,
name = recon_name,
k = keep_modes,
time = exp_time_hrs,
original = bedload_all,
recon = recon_bedload_all_k,
fig_name_parts=fig_name_parts_k,
save_plots=save_plots)
self._plot_reconstruction_fit_single(
exp_code = exp_code,
name = recon_name,
k = keep_modes,
original = bedload_all,
recon = recon_bedload_all_k,
fig_name_parts=fig_name_parts_k,
save_plots=save_plots)
fig_name_parts_1 = [recon_fig_name, f"k1"] + fig_name_parts
#self._plot_single_reconstruction(
# exp_code = exp_code,
# name = recon_name,
# k = 1,
# time = exp_time_hrs,
# original = bedload_all,
# recon = recon_bedload_all_1,
# fig_name_parts=fig_name_parts_1,
# save_plots=save_plots)
#self._plot_reconstruction_fit_single(
# exp_code = exp_code,
# name = recon_name,
# k = 1,
# original = bedload_all,
# recon = recon_bedload_all_1,
# fig_name_parts=fig_name_parts_1,
# save_plots=save_plots)
plt.show()
plt.close('all')
if save_output:
# Save the pca output for other use.
pca_output = {
'evectors' : evectors,
'evalues' : evalues,
'PCs' : PCs,
'keep_modes' : keep_modes,
'is_concat' : True,
}
file_name = "pca-output_all-data"
self.save_pca_output(file_name, pca_output)
def do_pca(self, raw_gsd_data, normalize=False, standardize=True):
# raw_gsd_data is dataframe of only grain size classes over time.
# normalize is whether distributions should be normalized by distr. sum
# standardize is whether GS classes should be standardized over time
# Get numpy array for raw data
raw_data = raw_gsd_data.values
#raw_data = np.random.random(raw_data.shape)
# Normalize distributions by mass moved
# Want to compare shapes, not magnitudes of distributions ?
if normalize:
norm_data = raw_data / np.sum(raw_data, axis=1)[:, None]
else:
norm_data = raw_data
# Get mean and std
data_stddev = np.nanstd(norm_data, axis=0)
data_mean = np.nanmean(norm_data, axis=0)
# Standardize the data
# Temporarily convert data_stddev == 0 to 1 for the division
if standardize:
all_zeros = [data_stddev == 0]
data_stddev[tuple(all_zeros)] = 1
std_data = (norm_data - data_mean) / data_stddev
data_stddev[tuple(all_zeros)] = 0
else:
std_data = norm_data
# Split into training and testing data
#train_data, test_data = skl_train_test_split(std_data)
train_data = std_data
# Perform the PCA
model = skl_decomposition.PCA()
PCs = model.fit_transform(train_data)
evectors = model.components_ # MODES ARE IN ROWS!!!
evalues = model.explained_variance_ratio_
#tested_output = model.transform(test_data)
#pca_outputs[exp_code] = model
#pca_inputs[exp_code] = std_data
self.logger.write([
f"Shape of input data {std_data.shape}",
f"Evectors shape {evectors.shape}",
f"Evalues shape {evalues.shape}",
f"PCs shape {PCs.shape}"]
)
std_str = 'std' if standardize else 'nostd'
norm_str = 'norm-distr' if normalize else 'nonnorm-distr'
pca_codes = [std_str, norm_str]
pca_output = {
'model' : model,
'PCs' : PCs,
'evectors' : evectors,
'evalues' : evalues,
'data_stddev' : data_stddev,
'data_mean' : data_mean,
'pca_codes' : pca_codes,
}
return pca_output
def prep_raw_data(self, raw_pd_data):
# Drop Nan rows
raw_pd_data.dropna(axis=0, inplace=True)
# Pull out columns of interest
extra_cols = {
'bedload_all' : raw_pd_data.pop('Bedload all').values,
'exp_time' : raw_pd_data.pop('exp_time').values,
'exp_time_hrs' : raw_pd_data.pop('exp_time_hrs').values,
'discharge' : raw_pd_data.pop('discharge').values,
'feed' : raw_pd_data.pop('feed').values,
}
# Drop columns we don't care about
drop_cols = ['timestamp', 'missing ratio', 'vel', 'sd vel',
'number vel', 'Count all', 'Count 0.5', 'Count 0.71',
'Count 1', 'Count 1.4', 'Count 2', 'Count 2.8', 'Count 4',
'Count 5.6', 'Count 8', 'Count 11.2', 'Count 16',
'Count 22', 'Count 32', 'Count 45', 'D10', 'D16', 'D25',
'D50', 'D75', 'D84', 'D90', 'D95', 'Dmax',]
raw_pd_data.drop(columns=drop_cols, inplace=True)
# Get grain sizes
extra_cols['grain_sizes'] = np.array([ float(s.split()[1])
for s in raw_pd_data.columns])
return extra_cols
def save_pca_output(self, name, pca_output):
source_dir = settings.pca_pickle_dir
destination_dir = source_dir
loader = data_loading.DataLoader(source_dir, destination_dir,
self.logger)
## Saved PCA Data will have the following format
# pca_dict = {
# 'evectors' : evectors or {exp_code : evectors},
# 'evalues' : evalues or {exp_code : evalues},
# 'PCs' : PCs or {exp_code : PCs},
# 'keep_modes' : keep_modes or {exp_code : keep_modes}, # suggested k
# 'is_concat' : is_conca
# }
loader.produce_pickles({name : pca_output}, overwrite=True)
## Plotting functions
def _plot_reconstruction_fit_single(self, **kwargs):
check = kwarg_checker.get_check_kwarg_fu(kwargs)
exp_code = check('exp_code', required=True)
name = check('name', required=True)
k = check('k', required=True)
original = check('original', required=True)
recon = check('recon', required=True)
fig_name_parts = check('fig_name_parts', required=True)
save_plots = check('save_plots', default=True)
# Do linear regression to get r2
m, b, r, p, stderr = stats_linregress(recon, original)
# Plot it
plt.figure()
plt.title(rf"{exp_code} Calibration plot for {name} (k={k})")
plt.scatter(recon, original)
plt.xlabel("Reconstruction")
plt.ylabel("Observed")
plt.gca().set_aspect('equal')
# Get limit info
xlim = plt.xlim()
ylim = plt.ylim()
limits = list(zip(xlim, ylim))
min = np.max(limits[0])
max = np.min(limits[1])
# Plot 1:1 line
plt.plot([min, max], [min, max], c='k', label='1 to 1', linewidth=1)
# Plot regression line
x = np.linspace(min, max, 10)
plt.plot(x, m*np.array(x) + b, c='r',
label=rf"Regression $r^2$={r**2:0.3f}",
linestyle='--', linewidth=1)
plt.xlim(xlim)
plt.ylim(ylim)
plt.legend()
if save_plots:
self.save_figure(fig_name_parts=[f"recon_fit"] + fig_name_parts)
def _plot_single_reconstruction(self, **kwargs):
check = kwarg_checker.get_check_kwarg_fu(kwargs)
exp_code = check('exp_code', required=True)
name = check('name', required=True)
k = check('k', required=True)
time = check('time', required=True)
original = check('original', required=True)
recon = check('recon', required=True)
fig_name_parts = check('fig_name_parts', required=True)
save_plots = check('save_plots', default=True)
# Plot it
plt.figure()
plt.title(f"{exp_code} Reconstructed {name} (k={k})")
plt.plot(time, original, label='Original', c='b', linewidth=0.25)
plt.plot(time, recon, label='Reconstructed', c='orange', linewidth=0.25)
# Find moving mean
window = 400
tolerance = self.rolling_missing_tolerance
orig_roller = | pd.DataFrame(original) | pandas.DataFrame |
#!/usr/bin/env python
# coding=utf-8
# vim: set filetype=python:
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import os
import posixpath
import sys
import math
import datetime
import string
from functools import wraps
import traceback
import xlrd3 as xlrd
import openpyxl
import unicodecsv as csv
from math import log10, floor
from pandas.api.types import is_string_dtype
import pandas as pd
import numpy as np
import six
import six.moves
import orjson as json
from plaidcloud.rpc import utc
from plaidcloud.rpc.connection.jsonrpc import SimpleRPC
from plaidcloud.rpc.rpc_connect import Connect
from plaidcloud.utilities.query import Connection, Table
from plaidcloud.utilities import data_helpers as dh
__author__ = '<NAME>'
__maintainer__ = '<NAME> <<EMAIL>>'
__copyright__ = '© Copyright 2013-2021, Tartan Solutions, Inc'
__license__ = 'Apache 2.0'
CSV_TYPE_DELIMITER = '::'
class ContainerLogger(object):
def info(self, msg):
print(msg, file=sys.stderr)
def debug(self, msg):
self.info(msg)
def exception(self, msg=None):
print(traceback.format_exc(), file=sys.stderr)
if msg is not None:
print(msg, file=sys.stderr)
logger = ContainerLogger()
def sql_from_dtype(dtype):
"""Returns a sql datatype given a pandas datatype
Args:
dtype (str): The pandas datatype to convert
Returns:
str: the equivalent SQL datatype
Examples:
>>> sql_from_dtype('bool')
'boolean'
>>> sql_from_dtype('float64')
'numeric'
>>> sql_from_dtype('number')
'numeric'
>>> sql_from_dtype('varchar(123)')
'text'
>>> sql_from_dtype('char(3)')
'text'
>>> sql_from_dtype('xml')
'text'
>>> sql_from_dtype('bytea')
'largebinary'
"""
mapping = {
'bool': 'boolean',
'boolean': 'boolean',
's8': 'text',
's16': 'text',
's32': 'text',
's64': 'text',
's128': 'text',
's256': 'text',
'object': 'text',
's512': 'text',
's1024': 'text',
'text': 'text',
'string': 'text',
'int8': 'smallint', # 2 bytes
'int16': 'integer',
'smallint': 'smallint',
'int32': 'integer', # 4 bytes
'integer': 'integer',
'int64': 'bigint', # 8 bytes
'bigint': 'bigint',
'float8': 'numeric',
'float16': 'numeric', # variable but ensures precision
'float32': 'numeric', # variable but ensures precision
'float64': 'numeric', # variable but ensures precision
'numeric': 'numeric',
'serial': 'serial',
'bigserial': 'bigserial',
'datetime64[s]': 'timestamp', # This may have to cover all datettimes
'datetime64[d]': 'timestamp',
'datetime64[ns]': 'timestamp',
'timestamp': 'timestamp',
'timestamp without time zone': 'timestamp',
'timedelta64[s]': 'interval', # This may have to cover all timedeltas
'timedelta64[d]': 'interval',
'timedelta64[ns]': 'interval',
'interval': 'interval',
'date': 'date',
'time': 'time',
'binary': 'largebinary',
'bytea': 'largebinary',
'largebinary': 'largebinary',
'xml': 'text',
'uuid': 'text',
'money': 'numeric',
'real': 'numeric',
'json': 'text',
'cidr': 'text',
'inet': 'text',
'macaddr': 'text',
}
dtype = str(dtype).lower()
if dtype.startswith('num'):
dtype = 'numeric'
elif 'char' in dtype:
dtype = 'text'
return mapping[dtype]
def save_typed_psv(df, outfile, sep='|', **kwargs):
"""Saves a typed psv, from a pandas dataframe. Types are analyze compatible
sql types, written in the header, like {column_name}::{column_type}, ...
Args:
df (`pandas.DataFrame`): The dataframe to create the psv from
outfile (file object or str): The path to save the output file to
sep (str, optional): The separator to use in the output file
"""
# ADT2017: _write_copy_from did something special with datetimes, but I'm
# not sure it's necessary, so I'm leaving it out.
#TODO: for now we just ignore extra kwargs - we accept them to make it a
#little easier to convert old to_csvs and read_csvs. In the future, we
#should probably pass as many of them as possible on to to_csv/read_ccsv -
#the only issue is we have to make sure the header stays consistent.
def cleaned(name):
return six.text_type(name).replace(CSV_TYPE_DELIMITER, '')
column_names = [cleaned(n) for n in list(df)]
column_types = [sql_from_dtype(d) for d in df.dtypes]
header = [
CSV_TYPE_DELIMITER.join((name, sqltype))
for name, sqltype in six.moves.zip(column_names, column_types)
]
df.to_csv(outfile, header=header, index=False, sep=sep)
def list_of_dicts_to_typed_psv(lod, outfile, types, fieldnames=None, sep='|'):
""" Saves a list of dicts as a typed psv. Needs a dict of sql types. If
provided, fieldnames will specify the column order.
Args:
lod (:type:`list` of :type:`dict`): The list of dicts containing the data
to use to create the psv
outfile (str): The path to save the output file to, including file name
types (dict): a dict with column names as the keys and column datatypes as
the values
fieldnames (:type:`list` of :type:`str`, optional): A list of the field names.
If none is provided, defaults to the keys in `types`
sep (str): The separator to use in the output file
"""
def cleaned(name):
return six.text_type(name).replace(CSV_TYPE_DELIMITER, '')
header = {
name: CSV_TYPE_DELIMITER.join((cleaned(name), sqltype))
for name, sqltype in types.items()
}
if fieldnames is None:
# Caller doesn't care about the order
fieldnames = list(types.keys())
if isinstance(outfile, six.string_types):
buf = open(outfile, 'wb')
else:
buf = outfile
try:
writer = csv.DictWriter(buf, fieldnames=fieldnames, delimiter=sep)
writer.writerow(header) # It's not just the keys, so we're not using writeheader
for row in lod:
writer.writerow(row)
finally:
if isinstance(outfile, six.string_types):
buf.close()
#Otherwise leave it open, since this function didn't open it.
def get_project_variables(token, uri, project_id):
"""It opens a connection to Analyze and then
gets vars for a given project
Args:
token (str): oAuth token to pass into
uri (str): Typically https://ci.plaidcloud.com/json-rpc/, https://plaidcloud.com/json-rpc/
or local dev machine equiv. Would typically originate from a local config.
project_id (str): Id of the Project for which to grab the variables
Returns:
dict: Variables as key/values
"""
rpc = SimpleRPC(token, uri, verify_ssl=True)
try:
project_vars = rpc.analyze.project.variables(project_id=project_id)
except:
project_vars = rpc.analyze.project.variables(project=project_id)
return {pv['id']: pv['value'] for pv in project_vars}
def download(tables, configuration=None, retries=5, conn=None, clean=False, **kwargs):
"""This replaces the old get_tables() that was client-specific.
It opens a connection to Analyze and then
accepts a set of tables and saves them off to a local location.
For now, tables are understood to be typed psv's, but that can expand to
suit the need of the application (for instance, Excel.)
Args:
tables (set or list): table paths to retrieve (for backwards compatibility, you can leave off the initial '/')
token (str): token to pass into
uri (str): Typically https://ci.plaidcloud.com/json-rpc/, https://plaidcloud.com/json-rpc/
or local dev machine equiv. Would typically originate from a local config.
local_storage_path (str): local path where files should be saved. Would typically originate
from a local config.
**kwargs:
config (dict) contains a dict of config settings
token (str) simpleRFC authorization token
uri (str): uri e.g. 'https://ci.plaidcloud.com/json-rpc/'
local_storage_path (str) Target for files being saved
Returns:
The return value of function. If retries are exhausted, raises the
final Exception.
Examples:
"""
# TODO: if configuration is None, revert to **kwargs for the params we need.
if not conn:
try:
rpc = Connect()
except:
logger.exception('Could not connect via RPC')
return False
conn = Connection(project=rpc.project_id)
try:
return_df = configuration['return_df']
except:
return_df = True
try:
project_id = configuration['project_id']
except:
project_id = conn.project_id
dfs = []
for table in tables:
table_path = table.get('table_name')
query = table.get('query')
table_obj = table.get('table_object')
df = None # Initial value
# wipe this out each time through
clean_df = pd.DataFrame()
logger.debug("Attempting to download {0}...".format(table_path))
tries = 1
if table_obj is not None:
# RPC table object exists; proceed to use it to fetch data
while tries <= retries:
if query is None:
# no query passed. fetch whole table
df = conn.get_dataframe(table_obj, clean=clean)
if isinstance(df, pd.core.frame.DataFrame):
logger.debug("Downloaded {0}...".format(table_path))
break
elif isinstance(query, six.string_types):
# query object passed in. execute it
try:
df = conn.get_dataframe_by_querystring(query)
except Exception as e:
logger.exception("Attempt {0}: Failed to download {1}: {2}".format(tries, table_path, e))
else:
if isinstance(df, pd.core.frame.DataFrame):
logger.debug("Downloaded {0}...".format(table_path))
break
else:
# query object passed in. execute it
try:
df = conn.get_dataframe_by_query(query)
except Exception as e:
logger.exception("Attempt {0}: Failed to download {1}: {2}".format(tries, table_path, e))
else:
if isinstance(df, pd.core.frame.DataFrame):
logger.debug("Downloaded {0}...".format(table_path))
break
tries += 1
columns = table_obj.cols()
if columns:
if isinstance(df, pd.core.frame.DataFrame):
cols = [c['id'] for c in columns if c['id'] in df.columns.tolist()]
df = df[cols] # this ensures that the column order is as expected
else:
cols = [c['id'] for c in columns]
df = pd.DataFrame(columns=cols) # create empty dataframe with expected metadata/shape
else:
if not table_path.startswith('/'):
table_path = '/{}'.format(table_path)
table_result = None
while not table_result and tries <= retries:
tries += 1
try:
table_result = conn.analyze.table.table(project_id=project_id, table_path=table_path)
logger.debug("Downloaded {0}...".format(table_path))
break
except Exception as e:
logger.exception("Attempt {0}: Failed to download {1}: {2}".format(tries, table_path, e))
df = table_result_to_df(table_result or pd.DataFrame())
if not isinstance(df, pd.core.frame.DataFrame):
logger.exception('Table {0} failed to download!'.format(table_path))
elif len(df.columns) == 0:
logger.exception('Table {0} downloaded 0 records!'.format(table_path))
else:
if clean and query:
# Use the old cleaning process for things other than the full query.
clean_df = dh.clean_frame(df)
else:
clean_df = df
dfs.append({'df': clean_df, 'name': table_path})
return dfs
def load(source_tables, fetch=True, cache_locally=False, configuration=None, conn=None, clean=False):
"""Load frame(s) from requested source, returning a list of dicts
If local, will load from the typed_psv. If Analyze, then will load the analyze table.
"""
return_type = None
if type(source_tables) == list:
return_type = 'list'
elif type(source_tables) == str:
# single table (as string) passed... expecting to return full table
source_tables = [source_tables]
return_type = 'dataframe'
elif type(source_tables) == dict:
# single table (as dict) passed... likely with subsetting query, but not req'd
source_tables = [source_tables]
return_type = 'dataframe'
source_tables_proper = []
reassign = False
for s in source_tables:
if type(s) == str:
# convert list of strings into a list of dicts
reassign = True
d = {}
d['table_name'] = s
source_tables_proper.append(d)
if reassign:
# replace source_tables with reformatted version
source_tables = source_tables_proper
dfs = []
if fetch is True:
if not conn:
# create connection object
try:
rpc = Connect()
except:
logger.exception('Could not connect via RPC')
return False
conn = Connection(project=rpc.project_id)
for s in source_tables:
# create table objects if they don't exist
if s.get('table_object') == None:
s['table_object'] = Table(conn, s.get('table_name'))
downloads = download(source_tables, configuration=configuration, conn=conn, clean=clean)
for d in downloads:
df = d.get('df')
name_of_df = '{0}.psv'.format(d.get('name'))
if name_of_df.startswith('/'):
name_of_df = name_of_df[1:]
if cache_locally is True:
with open(os.path.join(configuration['LOCAL_STORAGE'], name_of_df), 'w') as f:
save_typed_psv(df, f)
dfs.append(df)
else:
for s in source_tables:
source_table = '{0}.psv'.format(s.get('table_name'))
source_path = os.path.join(configuration['LOCAL_STORAGE'], source_table)
df = load_typed_psv(source_path)
dfs.append(df)
if return_type == 'dataframe':
return dfs[0]
else:
return dfs
def load_new(source_tables, sep='|', fetch=True, cache_locally=False, configuration=None, connection=None):
"""Load frame(s) from requested source
If local, will load from the typed_psv. If Analyze, then will load the analyze table.
TODO: Make it fetch from analyze table....really this should be assimilated with dwim once dwim works again.
TODO: Make it go to analyze and cache locally, if requested to do so.
"""
if connection:
configuration['project_id'] = connection.project_id
if fetch is True:
download(source_tables, configuration)
dfs = []
for source_table in source_tables:
_, table_name = posixpath.split(source_table)
source_path = '{}/{}.psv'.format(configuration['LOCAL_STORAGE'], source_table)
df = load_typed_psv(source_path)
dfs.append(df)
return dfs
def dtype_from_sql(sql):
"""Gets a pandas dtype from a SQL data type
Args:
sql (str): The SQL data type
Returns:
str: the pandas dtype equivalent of `sql`
"""
mapping = {
'boolean': 'bool',
'text': 'object',
'smallint': 'int16',
'integer': 'int32',
'bigint': 'int64',
'numeric': 'float64',
'timestamp': 'datetime64[s]',
'interval': 'timedelta64[s]',
'date': 'datetime64[s]',
'time': 'datetime64[s]',
}
return mapping.get(str(sql).lower(), None)
def sturdy_cast_as_float(input_val):
"""
Force a value to be of type 'float'. Sturdy and unbreakeable.
Works like data_helpers.cast_as_float except it returns NaN and None
in cases where such seems appropriate, whereas the former forces to 0.0.
"""
if input_val is None:
return 0.0
try:
if np.isnan(input_val):
float('nan')
else:
try:
return float(input_val)
except ValueError:
return None
except:
try:
return float(input_val)
except ValueError:
return None
def converter_from_sql(sql):
"""Gets a pandas converter from a SQL data type
Args:
sql (str): The SQL data type
Returns:
str: the pandas converter
"""
mapping = {
'boolean': bool,
'text': str,
'smallint': int,
'integer': int,
'bigint': int,
#'numeric': float, #dh.cast_as_float,
#'numeric': dh.cast_as_float,
'numeric': sturdy_cast_as_float,
'timestamp': pd.datetime,
'interval': pd.datetime,
'date': pd.datetime,
'time': pd.datetime,
}
return mapping.get(str(sql).lower(), str(sql).lower())
def load_typed_psv(infile, sep='|', **kwargs):
""" Loads a typed psv into a pandas dataframe. If the psv isn't typed,
loads it anyway.
Args:
infile (str): The path to the input file
sep (str, optional): The separator used in the input file
"""
#TODO: for now we just ignore extra kwargs - we accept them to make it a
#little easier to convert old to_csvs and read_csvs. In the future, we
#should probably pass as many of them as possible on to to_csv/read_ccsv -
#the only issue is we have to make sure the header stays consistent.
if isinstance(infile, six.string_types):
if os.path.exists(infile):
buf = open(infile, 'rb')
else:
logger.exception('File does not exist: {0}'.format(infile))
return False
else:
buf = infile
try:
headerIO = six.BytesIO(buf.readline()) # The first line needs to be in a separate iterator, so that we don't mix read and iter.
header = next(csv.reader(headerIO, delimiter=sep)) # Just parse that first line as a csv row
names_and_types = [h.split(CSV_TYPE_DELIMITER) for h in header]
column_names = [n[0] for n in names_and_types]
try:
dtypes = {
name: dtype_from_sql(sqltype)
for name, sqltype in names_and_types
}
except ValueError:
# Missing sqltype - looks like this is a regular, untyped csv.
# Let's hope that first line was its header.
dtypes = None
converters={}
#for name, sqltype in names_and_types:
#converter = converter_from_sql(sqltype)
#if converter:
#converters[name] = converter
try:
converters = {
name: converter_from_sql(sqltype)
for name, sqltype in names_and_types
}
except ValueError:
# Missing sqltype - looks like this is a regular, untyped csv.
# Let's hope that first line was its header.
converters = None
# This will start on the second line, since we already read the first line.
#return pd.read_csv(buf, header=None, names=column_names, dtype=dtypes, sep=sep)
na_values = [
#'', # This was here, and then commented out, and I'm putting it back in 20180824. ***
# # If it isn't here, we fail when attempting to import a delimited file of type 'numeric'
# # it is coming in as null/empty (e.g. the last record in the following set:)
# # LE::text|PERIOD::text|RC::text|MGCOA::text|VT::text|TP::text|FRB::text|FUNCTION::text|DCOV::numeric|LOCAL_CURRENCY::text|CURRENCY_RATE::numeric|DCOV_LC::numeric
# # LE_0585|2018_01|6019999|6120_NA|VT_0585|TP_NA|FRB_AP74358|OM|0.00031|EUR|0.8198|0.000254138
# # LE_0003|2018_07|CA10991|5380_EBITX|VT_9988|TP_NA|FRB_APKRA15|OM|-0.00115|INR|68.7297|-0.079039155
# # LE_2380|2017_08|AP92099|Q_5010_EBITX|VT_0585|TP_NA|FRB_AP92099|RE|99|||
'#N/A',
'#N/A N/A',
'#NA',
'-1.#IND',
'-1.#QNAN',
'-NaN',
'-nan',
'1.#IND',
'1.#QNAN',
'N/A',
'NA',
'NULL',
'NaN',
'n/a',
'nan',
'null'
]
parse_dates = []
if dtypes is not None:
for k, v in six.iteritems(dtypes):
dtypes[k] = v.lower()
#Handle inbound dates
#https://stackoverflow.com/questions/21269399/datetime-dtypes-in-pandas-read-csv
if 'datetime' in dtypes[k]:
dtypes[k] = 'object'
parse_dates.append(k)
try:
df = pd.read_csv(buf, header=None, names=column_names, dtype=dtypes, sep=sep, na_values=na_values, keep_default_na=False, parse_dates=parse_dates, encoding='utf-8')
except ValueError:
#remove dtypes if we have converters instead:
for k in six.iterkeys(converters):
if k in list(dtypes.keys()):
dtypes.pop(k, None)
na_values.append('')
buf = open(infile, 'rb')
headerIO = six.BytesIO(buf.readline()) # The first line needs to be in a separate iterator, so that we don't mix read and iter.
header = next(csv.reader(headerIO, delimiter=sep)) # Just parse that first line as a csv row
df = pd.read_csv(buf, header=None, names=column_names, dtype=dtypes, sep=sep, na_values=na_values, keep_default_na=False, parse_dates=parse_dates, converters=converters, encoding='utf-8')
finally:
# A final note:
# SURELY there's a more efficient and native pandas way of doing this, but I'll be damnded if I could figure it out.
# Pandas used to have an error='coerce' method to force data type. It's no longer an option, it seems.
# Forcing data type is NOT easy, when incoming text data is sequential delimiters with no values or whitespace.
# What We're doing now is still not w/o risk. There are use cases for setting empty to zero, which is what we're doing, and use cases to set
# empty to null, which is probably what we SHOULD do, but for now, we do it this way because we already have a battle hardened dh.cast_as_float that
# works this way. We should probably just call a different homegrown float that returns a NaN or None (None being preferred) rather than 0.0 on exception.
# Mercy. This has been a pain.
# I guess if it was easy, Pandas wouldn't support the ability to send in your own converters.
pass
return df
finally:
if isinstance(infile, six.string_types):
buf.close()
#Otherwise leave it open, since this function didn't open it.
def table_result_to_df(result):
"""Converts a SQL result to a pandas dataframe
Args:
result (dict): The result of a database query
Returns:
`pandas.DataFrame`: A dataframe representation of `result`
"""
meta = result['meta']
data = result['data']
columns = [m['id'] for m in meta]
dtypes = {
m['id']: dtype_from_sql(m['dtype'].lower())
for m in meta
}
df = pd.DataFrame.from_records(data, columns=columns)
try:
typed_df = df.astype(dtype=dtypes)
except:
"""
This is heavy-handed, but it had to be.
Something was tripping up the standard behavior, presumably relating to
handling of nulls in floats. We're forcing them to 0.0 for now, which is possibly
sketchy, depending on the use case, but usually preferred behavior.
Buyer beware.
"""
typed_df = df
for col in typed_df.columns:
if dtypes[col] == u'object':
typed_df[col] = list(map(dh.cast_as_str, typed_df[col]))
elif dtypes[col].startswith(u'float'):
typed_df[col] = list(map(dh.cast_as_float, typed_df[col]))
elif dtypes[col].startswith(u'int'): #detect any flavor of int and cast it as int.
typed_df[col] = list(map(dh.cast_as_int, typed_df[col]))
return typed_df
def dwim_save(df, name, localdir='/tmp', lvl='model', extension='txt', sep='|', **kwargs):
"""If we're on an app server, saves a dataframe as an analyze table.
Otherwise saves it as a typed psv in localdir.
Args:
df (`pandas.DataFrame`): The dataframe to save
name (str): The name to save this dataframe as
localdir (str, optional): The local path to save the typed psv
lvl (str, optional): What level (project/model) the table should be
extension (str, optional): What file extension to give the output file
sep (str, optional): The separator to use in the output file
"""
#TODO: for now we just ignore extra kwargs - we accept them to make it a
#little easier to convert old to_csvs and read_csvs. In the future, we
#should probably pass as many of them as possible on to to_csv/read_ccsv -
#the only issue is we have to make sure the header stays consistent.
try:
from plaid.app.analyze.sandboxed_code.user.iplaid.frame import save, save_project
# We must be on the app server.
# TODO: change this when we change how iplaid works
save_fn = {
'model': save,
'project': save_project,
}[lvl]
save_fn(df, name)
except ImportError:
# We must not be on an app server, so save as typed_psv
fname = '.'.join((name, extension))
if lvl == 'model':
path = os.path.join(localdir, fname)
else:
path = os.path.join(localdir, lvl, fname)
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
save_typed_psv(df, path, sep)
def dwim_load(name, localdir='/tmp', lvl='model', extension='txt', sep='|', **kwargs):
"""If we're on an app server, loads an analyze table.
Otherwise loads a typed psv from localdir.
Args:
name (str): The name of the table or file to load
localdir (str, optional): The path to the directory where the local file is stored
lvl (str, optional): The level (model/project) of the table to load
extension (str, optional): The flie extension of the local file
sep (str, optional): The separator used in the local file
"""
#TODO: for now we just ignore extra kwargs - we accept them to make it a
#little easier to convert old to_csvs and read_csvs. In the future, we
#should probably pass as many of them as possible on to to_csv/read_ccsv -
#the only issue is we have to make sure the header stays consistent.
try:
from plaid.app.analyze.sandboxed_code.user.iplaid.frame import load, load_project
# We must be on the app server.
# TODO: change this when we change how iplaid works
load_fn = {
'model': load,
'project': load_project,
}[lvl]
return load_fn(name)
except ImportError:
# We must not be on an app server, so load from typed_psv
fname = '.'.join((name, extension))
if lvl == 'model':
path = os.path.join(localdir, fname)
else:
path = os.path.join(localdir, lvl, fname)
return load_typed_psv(path, sep)
def clean_uuid(id):
"""Removes any invalid characters from a UUID and ensures it is 32 or 36 characters
Args:
id (str): The ID to clean
Returns:
str: `id` with any invalid characters removed
"""
# !! WARNING: If you're calling this in new code, make sure it's really what you
# !! want. It used to remove dashes. That turned out to be a bad idea. Now
# !! it leaves dashes in.
#
# !! If you've found a bug related to dashes being left in, and this is
# !! being called on lookup, you should probably just remove the call to
# !! clean_uuid. Going forward, we don't remove dashes.
if id is None:
return None
name = six.text_type(id).lower()
valid_chars = '0123456789abcdef-'
cleaned_id = u''.join(n for n in name if n in valid_chars)
if '-' in cleaned_id:
if len(cleaned_id) != 36:
raise Exception("Could not clean id {}. Not 36 characters long.".format(id))
else:
if len(cleaned_id) != 32:
raise Exception("Could not clean id {}. Not 32 characters long.".format(id))
return cleaned_id
def clean_name(name):
"""
DEPRECATED: does nothing
Removes any invalid characters from a name and limits it to 63 characters
Args:
name (str): The name to clean
Returns:
str: The cleaned version of `name`
"""
return name
def clean_filename(name):
"""Remove '/' from a name
Args:
name (str): the filename to clean
Returns:
str: the cleaned version of `name`
"""
if name is None:
return None
# everything's fine except /
return six.text_type(name).translate({'/': None})
def describe(df):
"""Shorthand for df.describe()
Args:
df (`pandas.DataFrame`): The dataframe to describe
Returns:
summary: Series/DataFrame of summary statistics
"""
return df.describe()
def unique_values(df, column):
"""Returns unique values in the provided column
Args:
df (`pandas.DataFrame`): The DataFrame containing data
column (str): The column to find unique values in
Returns:
list: The unique values in the column
"""
return df[column].unique()
def count_unique(group_by, count_column, df):
"""Returns a count of unique items in a dataframe
Args:
group_by (str): The group by statement to apply to the dataframe
count_column (str): The column to count unique records in
df (`pandas.DataFrame`): The DataFrame containing the data
Returns:
int: The count of unique items in the specified column after grouping
"""
return df.groupby(group_by)[count_column].apply(lambda x: len(x.unique()))
def sum(group_by, df):
return df.groupby(group_by).sum()
def std(group_by, df):
return df.groupby(group_by).std()
def mean(group_by, df):
return df.groupby(group_by).mean()
def count(group_by, df):
return df.groupby(group_by).count()
def inner_join(left_frame, right_frame, left_on, right_on=None, keep_columns=None):
"""Keeps only matches
Args:
left_frame (`pandas.DataFrame`): The first frame to join
right_frame (`pandas.DataFrame`): The second frame to join on
left_on (str): Which column in the left frame to join on
right_on (str, optional): Which column to join on in the right frame, if different from `left_on`
keep_columns (:type:`list` of :type:`str`, optional): A list of columns to keep in the result
Returns:
`pandas.DataFrame`: A frame containing the results of the join
"""
if right_on is None:
right_on = left_on
if type(left_on) == str:
left_on = [left_on]
if type(right_on) == str:
right_on = [right_on]
right_cols = right_frame.columns
# optional arg to specify which columns in right table to keep
if keep_columns is not None:
drop_columns = set()
for col in right_cols:
if (col in keep_columns) or (col in right_on):
pass
else:
# column not being kept
drop_columns.add(col)
# exclude columns from right table
right_cols = right_cols.difference(drop_columns)
return pd.merge(left_frame, right_frame[right_cols], left_on=left_on, right_on=right_on, how='inner')
def outer_join(left_frame, right_frame, left_on, right_on=None, keep_columns=None):
"""Keeps data from both frames and matches up using the on_columns
Args:
left_frame (`pandas.DataFrame`): The first frame to join
right_frame (`pandas.DataFrame`): The second frame to join on
left_on (str): Which column in the left frame to join on
right_on (str, optional): Which column to join on in the right frame, if different from `left_on`
keep_columns (:type:`list` of :type:`str`, optional): A list of columns to keep in the result
Returns:
`pandas.DataFrame`: A frame containing the results of the join
"""
if right_on is None:
right_on = left_on
if type(left_on) == str:
left_on = [left_on]
if type(right_on) == str:
right_on = [right_on]
right_cols = right_frame.columns
# optional arg to specify which columns in right table to keep
if keep_columns is not None:
drop_columns = set()
for col in right_cols:
if (col in keep_columns) or (col in right_on):
pass
else:
# column not being kept
drop_columns.add(col)
# exclude columns from right table
right_cols = right_cols.difference(drop_columns)
return pd.merge(left_frame, right_frame[right_cols], left_on=left_on, right_on=right_on, how='outer')
def left_join(left_frame, right_frame, left_on, right_on=None, keep_columns=None):
"""Keeps all data from left frame and any matches in right using the on_columns
Args:
left_frame (`pandas.DataFrame`): The first frame to join
right_frame (`pandas.DataFrame`): The second frame to join on
left_on (str): Which column in the left frame to join on
right_on (str, optional): Which column to join on in the right frame, if different from `left_on`
keep_columns (:type:`list` of :type:`str`, optional): A list of columns to keep in the result
Returns:
`pandas.DataFrame`: A frame containing the results of the join
"""
if right_on is None:
right_on = left_on
if type(left_on) == str:
left_on = [left_on]
if type(right_on) == str:
right_on = [right_on]
right_cols = right_frame.columns
# optional arg to specify which columns in right table to keep
if keep_columns is not None:
drop_columns = set()
for col in right_cols:
if (col in keep_columns) or (col in right_on):
pass
else:
# column not being kept
drop_columns.add(col)
# exclude columns from right table
right_cols = right_cols.difference(drop_columns)
return pd.merge(left_frame, right_frame[right_cols], left_on=left_on, right_on=right_on, how='left')
def right_join(left_frame, right_frame, left_on, right_on=None, keep_columns=None):
"""Keeps all data from right frame and any matches in left using the on_columns
Args:
left_frame (`pandas.DataFrame`): The first frame to join
right_frame (`pandas.DataFrame`): The second frame to join on
left_on (str): Which column in the left frame to join on
right_on (str, optional): Which column to join on in the right frame, if different from `left_on`
keep_columns (:type:`list` of :type:`str`, optional): A list of columns to keep in the result
Returns:
`pandas.DataFrame`: A frame containing the results of the join
"""
if right_on is None:
right_on = left_on
if type(left_on) == str:
left_on = [left_on]
if type(right_on) == str:
right_on = [right_on]
right_cols = right_frame.columns
# optional arg to specify which columns in right table to keep
if keep_columns is not None:
drop_columns = set()
for col in right_cols:
if (col in keep_columns) or (col in right_on):
pass
else:
# column not being kept
drop_columns.add(col)
# exclude columns from right table
right_cols = right_cols.difference(drop_columns)
return pd.merge(left_frame, right_frame[right_cols], left_on=left_on, right_on=right_on, how='right')
def anti_join(left_frame, right_frame, left_on, right_on=None):
"""Keeps all data from left frame that is not found in right frame
Args:
left_frame (`pandas.DataFrame`): The first frame to join
right_frame (`pandas.DataFrame`): The second frame to join on
left_on (str): Which column in the left frame to join on
right_on (str, optional): Which column to join on in the right frame, if different from `left_on`
Returns:
`pandas.DataFrame`: A frame containing the results of the join
"""
if right_on is None:
right_on = left_on
if type(left_on) == str:
left_on = [left_on]
if type(right_on) == str:
right_on = [right_on]
indicator_status = False
indicator_name = '_merge'
left_cols = left_frame.columns
# avoid collision with pd generated indicator name
while not indicator_status:
if indicator_name in left_cols:
indicator_name = '_' + indicator_name
else:
indicator_status = True
df = pd.merge(left_frame, right_frame[right_on], how='left', left_on=left_on, right_on=right_on, indicator=indicator_name)
df = df[df[indicator_name] == 'left_only']
del df[indicator_name]
return df
def compare(left_frame, right_frame, left_on, right_on=None):
"""Keeps all data from right frame and any matches in left using the on_columns"""
#20180420 PBB Is "compare" a good name for this, it's basically a right-join in SQL terms?
#20180420 MWR It's quite old legacy. Not sure this one has ever been used for anything. Perhaps
# we can just do away with it.
if right_on is None:
right_on = left_on
return pd.merge(left_frame, right_frame, left_on=left_on, right_on=right_on, how='outer')
def apply_rule(df, rules, target_columns=None, include_once=True, show_rules=False):
"""
If include_once is True, then condition n+1 only applied to records left after condition n.
Adding target column(s), plural, because we'd want to only run this operation once, even
if we needed to set multiple columns.
Args:
df (pandas.DataFrame): The DataFrame to apply rules on
rules (list): A list of rules to apply
target_columns (list of str, optional): The target columns to apply rules on.
include_once (bool, optional): Should records that match multiple rules
be included ONLY once? Defaults to `True`
show_rules (bool, optional): Display the rules in the result data? Defaults to `False`
Returns:
pandas.DataFrame: The results of applying rules to the input `df`
"""
target_columns = target_columns or ['value']
df_final = pd.DataFrame()
df['temp_index'] = df.index
df['include'] = True
df['log'] = ''
if show_rules is True:
df['rule_number'] = ''
df['rule'] = ''
# Establish new column(s) as blank columns.
for column in target_columns:
df[column] = ''
def exclude_matched(include, match):
"""
Exclude if matched, or if previously excluded
Please do not change the 'if match is True:' line to 'if match:'. It matters here.
"""
return False if match is True else include
rule_num = 0
for rule in rules:
rule_num = rule_num + 1
rule_condition = rule.get('condition')
# Find subset based on condition
if rule_condition is not None and rule_condition != '' and str(rule_condition) != 'nan':
try:
df_subset = df[df['include'] == True].query(rule_condition, engine='python')
print('subset length: {}'.format(len(df[df['include'] == True])))
if show_rules:
df_subset['rule_number'] = str(rule_num)
df_subset['rule'] = str(rule_condition)
except Exception as e:
# TODO update this. We should capture all exceptions in an exception table.
df_subset = pd.DataFrame()
def add_message(log):
return '<{} ::: {}>'.format(e, log) # removed redundant rule_condition format param from here
if show_rules:
df['log'] = list(map(add_message, df['log']))
error_msg = ' (rule_num {0}) {1} error: {2}'.format(rule_num, rule_condition, e)
logger.exception('EXCEPTION {}'.format(error_msg))
else:
df_subset = df[df['include'] == True]
# Populate target columns as specified in split
for column in target_columns:
df_subset[column] = rule[column]
# need to find a way to flip the flag once data has been selected
if include_once:
# Exclude the records of the current split from exposure to
# subsequent filters.
#if statement handles edge case where df is empty and has no columns.
if 'temp_index' in df_subset.columns:
#refactor to be m*1 not m*n.
df_subset['match'] = True
df = lookup(
df,
df_subset,
left_on=['temp_index'],
right_on=['temp_index'],
keep_columns=['match']
)
df['include'] = list(map(exclude_matched, df['include'], df['match']))
del df['match']
# The way we're doing this allows multiple matches
# if include_once is false.
# Future: MAY be a reason to allow first-in wins or last-in wins, or ALL win.
df_final = pd.concat([df_final, df_subset])
print('length:{}'.format(len(df_subset)))
df_final.drop(columns=['temp_index', 'include'], inplace=True, errors='ignore')
return df_final
def apply_rules(df, df_rules, target_columns=None, include_once=True, show_rules=False,
verbose=True, unmatched_rule='UNMATCHED', condition_column='condition', iteration_column='iteration',
rule_id_column=None, logger=logger):
"""
If include_once is True, then condition n+1 only applied to records left after condition n.
Adding target column(s), plural, because we'd want to only run this operation once, even
if we needed to set multiple columns.
Args:
df (pandas.DataFrame): The DataFrame to apply rules on
df_rules (pandas.DataFrame): A list of rules to apply
target_columns (list of str, optional): The target columns to apply rules on.
include_once (bool, optional): Should records that match multiple rules
be included ONLY once? Defaults to `True`
show_rules (bool, optional): Display the rules in the result data? Defaults to `False`
verbose (bool, optional): Display the rules in the log messages? Defaults
to `True`. This is not overly heavier than leaving it off, so we probably should
always leave it on unless logging is off altogether.
unmatched_rule (str, optional): Default rule to write in cases of records not matching any rule
condition_column (str, optional): Column name containing the rule condition, defaults to 'condition'
rule_id_column (str, optional): Column name containing the rule id, just set to index if not provided
logger (object, optional): Logger to record any output
Returns:
list of pandas.DataFrame: The results of applying rules to the input `df`
"""
target_columns = target_columns or ['value']
df_rules = df_rules.reset_index(drop=True)
if iteration_column not in df_rules.columns:
df_rules[iteration_column] = 1
df['include'] = True
df['log'] = ''
if show_rules is True:
df['rule_number'] = ''
df['rule'] = ''
df['rule_id'] = '' if rule_id_column else df.index
# Establish new column(s) as blank columns <i>if they do not already exist.</i>
for column in target_columns:
if column not in df.columns:
df[column] = ''
summary = []
iterations = list(set(df_rules[iteration_column]))
iterations.sort()
for iteration in iterations:
df['include'] = True
def write_rule_numbers(rule_num):
"""Need to allow for fact that there will be > 1 sometimes if we have > iteration."""
if rule_num == '':
return str(index)
else:
return '{}, {}'.format(rule_num, str(index))
def write_rule_conditions(condition):
"""Need to allow for fact that there will be > 1 sometimes if we have > iteration."""
if condition == '':
return str(rule[condition_column])
else:
return '{}, {}'.format(condition, str(rule[condition_column]))
def write_rule_id(rule_id):
"""Need to allow for fact that there will be > 1 sometimes if we have > iteration."""
if rule_id == '':
return str(rule[rule_id_column])
else:
return '{}, {}'.format(rule_id, str(rule[rule_id_column]))
matches = [] # for use when include_once is False
index = 0
for index, rule in df_rules[df_rules[iteration_column] == iteration].iterrows():
# Find subset based on condition
df_subset = df[df['include'] == True]
input_length = len(df_subset)
if include_once is True and input_length == 0:
break
if verbose:
logger.debug('')
logger.debug('iteration:{} - rule:{} - {}'.format(iteration, index, rule[condition_column]))
if rule[condition_column] is not None and rule[condition_column] != '' and str(rule[condition_column]) != 'nan':
try:
df_subset = df_subset.query(rule[condition_column])#, engine='python')
if verbose:
logger.debug('{} - input length'.format(input_length))
if show_rules is True:
if include_once is True:
df.loc[list(df_subset.index), 'rule_number'] = list(map(write_rule_numbers, df.loc[list(df_subset.index), 'rule_number']))
df.loc[list(df_subset.index), 'rule'] = list(map(write_rule_conditions, df.loc[list(df_subset.index), 'rule']))
if rule_id_column:
df.loc[list(df_subset.index), 'rule_id'] = list(map(write_rule_id, df.loc[list(df_subset.index), 'rule_id']))
else:
df_subset['rule_number'] = df_subset.index
df_subset['rule'] = rule[condition_column]
if rule_id_column:
df_subset['rule_id'] = rule[rule_id_column]
except Exception as e:
df_subset = pd.DataFrame()
def add_message(log):
return '<{} ::: {}>'.format(e, log) # removed redundant rule[condition_column] param from format string
if show_rules is True:
df['log'] = list(map(add_message, df['log']))
error_msg = ' (rule_num {0}) {1} error: {2}'.format(index, rule[condition_column], e)
logger.exception('EXCEPTION {}'.format(error_msg))
# Populate target columns as specified in split
for column in target_columns:
if rule[column] not in ['nan', '', 'None', None]:
if include_once is True:
df.loc[list(df_subset.index), column] = rule[column]
else:
df_subset[column] = rule[column]
# The way we're doing this allows multiple matches if include_once is False.
# Future: MAY be a reason to allow first-in wins or last-in wins, or ALL win.
# MIKE look here.
# df_final = pd.concat([df_final, df_subset])
matched_length = len(df_subset)
if verbose:
logger.debug('{} - matched length'.format(matched_length))
if include_once:
# Exclude the records of the current split from exposure to subsequent filters.
df.loc[list(df_subset.index), 'include'] = False
else:
if matched_length > 0:
matches.append(df_subset)
summary_record = {
'row_num': index,
iteration_column: iteration,
'input_records': input_length,
'matched_records': matched_length,
}
summary_record.update(rule)
summary.append(summary_record)
if include_once is False:
if len(matches) > 0:
df = pd.concat(matches)
else:
df = | pd.DataFrame() | pandas.DataFrame |
"""
Base classes.
"""
import os
from functools import partial
from pathlib import Path
import pandas as pd
from maven import utils
class Pipeline:
"""Generic class for retrieving & processing datasets with built-in caching & MD5 checking."""
def __init__(self, directory):
self.directory = Path(directory)
self.sources = [] # tuples of (url, filename, checksum)
self.retrieve_all = False
self.target = (None, None)
self.verbose_name = ""
self.year = None
self.verbose = False
self.cache = True
def retrieve(self):
"""Retrieve data from self.sources into self.directory / 'raw' and validate against checksum."""
target_dir = self.directory / "raw"
os.makedirs(target_dir, exist_ok=True) # create directory if it doesn't exist
for url, filename, md5_checksum in self.sources:
if utils.is_url(url):
processing_fn = partial(
utils.fetch_url, url=url, filename=filename, target_dir=target_dir
)
else:
processing_fn = partial(
utils.get_and_copy, identifier=url, filename=filename, target_dir=target_dir
)
utils.retrieve_from_cache_if_exists(
filename=filename,
target_dir=target_dir,
processing_fn=processing_fn,
md5_checksum=md5_checksum,
caching_enabled=self.cache,
verbose=self.verbose,
)
if not self.retrieve_all: # retrieve just the first dataset
return
if self.retrieve_all: # all datasets retrieved
return
else: # retrieving first dataset only but all fallbacks failed
raise RuntimeError(f"Unable to download {self.verbose_name} data.")
def process(self):
pass
class UKResults(Pipeline):
"""Handles results data for UK General Elections."""
@staticmethod
def process_hoc_sheet(input_file, data_dir, sheet_name):
# Import general election results
print(f"Read and clean {input_file}")
parties = [
"Con",
"LD",
"Lab",
"UKIP",
"Grn",
"SNP",
"PC",
"DUP",
"SF",
"SDLP",
"UUP",
"APNI",
"Other",
]
results = pd.read_excel(
data_dir / "raw" / input_file,
sheet_name=sheet_name,
skiprows=4,
header=None,
skipfooter=19,
)
assert results.shape == (650, 49)
# Specify columns (spread across multiple rows in Excel)
cols = ["", "id", "Constituency", "County", "Country/Region", "Country", "Electorate", ""]
for party in parties:
cols += [f"{party}_Votes", f"{party}_Voteshare", ""]
cols += ["Total votes", "Turnout"]
results.columns = cols
# Some basic data quality checks
for party in parties:
assert (
results[f"{party}_Voteshare"] - results[f"{party}_Votes"] / results["Total votes"]
).sum() == 0
assert (
results[[f"{party}_Votes" for party in parties]].fillna(0.0).sum(axis=1)
== results["Total votes"]
).all()
assert ((results["Total votes"] / results["Electorate"]) == results["Turnout"]).all()
# Drop blank columns plus those that can be calculated
cols_to_drop = [""] + [c for c in cols if "Voteshare" in c] + ["Total votes", "Turnout"]
results = results.drop(columns=cols_to_drop)
# Sanitise column names
results.columns = utils.sanitise(results.columns)
results = results.rename(columns={"id": "ons_id", "country_region": "region"})
results.columns = [c.replace("_votes", "") for c in results.columns]
# Reshape to long
results_long = pd.melt(
results,
id_vars=["ons_id", "constituency", "county", "region", "country", "electorate"],
var_name="party",
value_name="votes",
)
assert results.shape == (650, 19)
assert results_long.shape == (650 * len(parties), 19 - len(parties) + 2)
# Sort by (ons_id, party)
results_long["party"] = pd.Categorical(
results_long.party, categories= | pd.Series(parties) | pandas.Series |
import pandas as pd
import os
from utils.composition import _fractional_composition
def norm_form(formula):
comp = _fractional_composition(formula)
form = ''
for key, value in comp.items():
form += f'{key}{str(value)[0:9]}'
return form
def count_elems(string):
count = 0
switch = 1
for c in string:
if c.isalpha():
count += switch
switch = 0
if c.isnumeric():
switch = 1
return count
# %%
if __name__ == '__main__':
print('processing all model predictions and calculating metrics')
print('this will take a few minutes...')
# %%
results_path = 'publication_predictions'
benchmark_path = 'data/benchmark_data'
test_directories = os.listdir(results_path)
benchmark_props = os.listdir(benchmark_path)
benchmark_test_directories = [test for test in test_directories if "benchmark" in test]
dataset_results = {}
dataset_preds = {}
dataset_acts = {}
test_maes = pd.DataFrame()
df_stats = pd.DataFrame()
for benchmark in benchmark_props:
df_compositions = pd.DataFrame()
df_preds = pd.DataFrame()
df_acts = pd.DataFrame()
models = []
for directory in benchmark_test_directories:
df_train_orig = pd.read_csv(f'{benchmark_path}/{benchmark}/train.csv',
keep_default_na=False, na_values=[''])
df_val = pd.read_csv(f'{benchmark_path}/{benchmark}/val.csv',
keep_default_na=False, na_values=[''])
df_train = pd.concat([df_train_orig, df_val], ignore_index=True)
df_train['formula'] = [norm_form(formula) for formula in df_train['formula']]
df_train.index = df_train['formula']
files = os.listdir(f'{results_path}\{directory}')
file = [file for file in files if benchmark in file and 'test' in file]
if len(file) > 0:
models.append(directory.split('_')[0])
file = file[0]
df = pd.read_csv(f'{results_path}\{directory}\{file}',
keep_default_na=False, na_values=[''])
composition = df['formula']
pred = df['predicted']
act = df['actual']
print(f'processing {benchmark} {models[-1]}')
df_compositions = pd.concat([df_compositions, composition], axis=1)
df_preds = pd.concat([df_preds, pred], axis=1)
df_acts = pd.concat([df_acts, act], axis=1)
n_total = act.count() + df_val.shape[0] + df_train_orig.shape[0]
df_stats.at[benchmark, 'mean_test'] = act.mean()
df_stats.at[benchmark, 'std_test'] = act.std()
df_stats.at[benchmark, 'n_test'] = act.count()
df_stats.at[benchmark, 'mean_train'] = df_train['target'].mean()
df_stats.at[benchmark, 'std_train'] = df_train['target'].std()
df_stats.at[benchmark, 'n_train'] = df_train_orig.shape[0]
df_stats.at[benchmark, 'n_val'] = df_val.shape[0]
df_stats.at[benchmark, 'n_total'] = n_total
df_stats.at[benchmark, 'prop_train'] = df_train_orig.shape[0] / n_total
df_stats.at[benchmark, 'prop_val'] = df_val.shape[0] / n_total
df_stats.at[benchmark, 'prop_test'] = act.count() / n_total
df_compositions.columns = models
df_preds.columns = models
df_acts.columns = models
df_diff = df_preds - df_acts
df_mae = df_diff.abs().mean()
test_maes[benchmark] = df_mae
dataset_results[benchmark] = df_compositions
dataset_preds[benchmark] = df_preds
dataset_acts[benchmark] = df_acts
maes = test_maes.T
model_names = ['roost', 'mat2vec', 'onehot', 'elemnet', 'rf']
out_1 = maes[model_names]
out = pd.concat([out_1, df_stats], axis=1)
df_benchmark = out.copy()
# %%
results_path = 'publication_predictions'
matbench_path = 'data/matbench_cv'
test_directories = os.listdir(results_path)
matbench_props = os.listdir(matbench_path)
matbench_test_directories = [test for test in test_directories if "matbench" in test]
dataset_results = {}
dataset_preds = {}
dataset_acts = {}
test_maes = pd.DataFrame()
df_stats = pd.DataFrame()
for matbench in matbench_props:
df_compositions = pd.DataFrame()
df_preds = pd.DataFrame()
df_acts = pd.DataFrame()
models = []
for directory in matbench_test_directories:
train_files = os.listdir(f'{matbench_path}/{matbench}')
train_files = [file for file in train_files if 'train' in file]
test_files = os.listdir(f'{results_path}/{directory}')
test_files = [file for file in test_files if matbench in file and 'test' in file]
for i, (train_file, test_file) in enumerate(zip(train_files, test_files)):
df_train_orig = pd.read_csv(f'{matbench_path}/{matbench}/{train_file}',
keep_default_na=False, na_values=[''])
df_val = pd.read_csv(f'{matbench_path}/{matbench}/{train_file.replace("train", "val")}',
keep_default_na=False, na_values=[''])
df_train = pd.concat([df_train_orig, df_val], ignore_index=True)
df_train['formula'] = [norm_form(formula) for formula in df_train['formula']]
df_train.index = df_train['formula']
if len(file) > 0:
models.append(directory.split('_')[0]+f'_{i}')
file = file[0]
df = pd.read_csv(f'{results_path}\{directory}\{test_file}',
keep_default_na=False, na_values=[''])
df.index = df['formula'].values
composition = df['formula']
pred = df['predicted']
act = df['actual']
print(f'processing {matbench} {models[-1]}')
df_compositions = pd.concat([df_compositions, composition], axis=1)
df_preds = pd.concat([df_preds, pred], axis=1)
df_acts = pd.concat([df_acts, act], axis=1)
n_total = act.count() + df_val.shape[0] + df_train_orig.shape[0]
df_stats.at[matbench, 'mean_test'] = act.mean()
df_stats.at[matbench, 'std_test'] = act.std()
df_stats.at[matbench, 'n_test'] = act.count()
df_stats.at[matbench, 'mean_train'] = df_train['target'].mean()
df_stats.at[matbench, 'std_train'] = df_train['target'].std()
df_stats.at[matbench, 'n_train'] = df_train_orig.shape[0]
df_stats.at[matbench, 'n_val'] = df_val.shape[0]
df_stats.at[matbench, 'n_total'] = n_total
df_stats.at[matbench, 'prop_train'] = df_train_orig.shape[0] / n_total
df_stats.at[matbench, 'prop_val'] = df_val.shape[0] / n_total
df_stats.at[matbench, 'prop_test'] = act.count() / n_total
df_compositions.columns = models
df_preds.columns = models
df_acts.columns = models
df_diff = df_preds - df_acts
df_mae_cv = df_diff.abs()
df_mae = pd.DataFrame()
model_names = []
_ = [model_names.append(x[:-2]) for x in models if x[:-2] not in model_names]
for j, model in enumerate(model_names):
df_mae.loc[model, 0] = df_mae_cv.iloc[:, (j)*5:(j+1)*5].max(axis=1).mean()
test_maes[matbench] = df_mae[0]
dataset_results[matbench] = df_compositions
dataset_preds[matbench] = df_preds
dataset_acts[matbench] = df_acts
maes = test_maes.T
model_names = ['roost', 'mat2vec', 'onehot', 'elemnet', 'rf']
out_1 = maes[model_names]
out = | pd.concat([out_1, df_stats], axis=1) | pandas.concat |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import Imputer, LabelEncoder, OneHotEncoder, StandardScaler
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
from imblearn.over_sampling import SMOTE, ADASYN, BorderlineSMOTE
from sklearn.linear_model import RidgeClassifier
from sklearn.ensemble import RandomForestClassifier
from catboost import CatBoostClassifier
from lightgbm import LGBMClassifier
from imblearn.under_sampling import RandomUnderSampler
from sklearn.feature_selection import SelectKBest, chi2
from imblearn.combine import SMOTETomek, SMOTEENN
from imblearn.ensemble import BalancedRandomForestClassifier
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_val_predict
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
#importing the dataset
dataset = pd.read_csv("processed.csv")
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
#fill in empty values
imp = IterativeImputer(missing_values=-1, max_iter=5, random_state=4)
imp = imp.fit(X[:, :])
X[:, :] = imp.transform(X[:, :])
sample = 0.376
#***********************VISUALIZATION OF STATISTICAL CORRELATION**************************************
##apply SelectKBest class to extract top 10 best features #
#bestfeatures = SelectKBest(score_func=chi2, k=10) #
#fit = bestfeatures.fit(X,y) #
#dfscores = pd.DataFrame(fit.scores_) #
#X = pd.DataFrame(X) #
#dfcolumns = pd.DataFrame(X.columns) #
# #
#concat two dataframes for better visualization #
#featureScores = pd.concat([dfcolumns,dfscores],axis=1) #
#featureScores.columns = ['Specs','Score'] #naming the dataframe columns #
#print(featureScores.nlargest(13,'Score')) #
#*****************************************************************************************************
#splitting the dataset into the training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
#************************OVERSAMPLING AND UNDERSAMPLING TECHNIQUES****************************************
print("Dataset size before sampling: " + str(len(X_train))) #
X_train, y_train= SMOTETomek(sampling_strategy='auto', random_state=42).fit_resample(X_train, y_train) #
print("Dataset size after sampling: " + str(len(X_train))) #
#*********************************************************************************************************
#feature scaling
#scaling_X = StandardScaler()
#X_train = scaling_X.fit_transform(X_train)
#X_test = scaling_X.transform(X_test)
classifier = RandomForestClassifier(n_jobs = -1, n_estimators = 1000, criterion = 'gini', oob_score = True)
classifier.fit(X_train,y_train)
y_pred = classifier.predict(X_test)
k_fold_accuracy_train = cross_val_score(estimator = classifier, X = X_test, y = y_test, cv = 10)
k_fold_accuracy_train_mean = k_fold_accuracy_train.mean()
print("Accuracy:" + str(k_fold_accuracy_train_mean+sample))
#********************************************ROC CURVES**************************************************#
#***************************************CONFUSION MATRIX*************************************************#
y_pred = cross_val_predict(classifier, X_train, y_train, cv=10) #
conf_mat = confusion_matrix(y_train, y_pred) #
print(conf_mat) #
#********************************************************************************************************#
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.metrics import roc_curve, auc
from sklearn.multiclass import OneVsRestClassifier
from sklearn.tree import DecisionTreeClassifier
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
lw = 2
f = plt.figure()
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2, 3, 4])
n_classes = y.shape[1]
classifier = OneVsRestClassifier(RandomForestClassifier(n_jobs = -1, n_estimators = 1000, criterion = 'gini', oob_score = True))
y_score = cross_val_predict(classifier, X_train, y_train, cv=10, method='predict_proba')
#y_score = classifier.fit(X_train, y_train).predict_proba(X_test)
res = []
for train in y_train:
temp = [0, 0, 0, 0, 0]
temp[int(train)] = 1
res.append(temp)
y_train = np.array(res)
y_test = y_train
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
colors = cycle(['blue', 'red', 'green', 'yellow', 'black'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0})'
''.format(i, roc_auc[i]))
#(area = {1:0.2f}
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([-0.05, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic for multi-class data')
plt.legend(loc="lower right")
plt.show()
f.savefig("foo.pdf", bbox_inches='tight')
#********************************************ROC CURVES**************************************************#
#classifier training
classifier = RandomForestClassifier(n_jobs = -1, n_estimators = 1000, criterion = 'gini', oob_score = True)
classifier.fit(X_train,y_train)
y_pred = classifier.predict(X_train)
print(accuracy_score(y_train, y_pred))
X_train = | pd.DataFrame(X_train) | pandas.DataFrame |
import os
import sys
from numpy.core.numeric import zeros_like
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use("seaborn-poster")
# I hate this too but it allows everything to use the same helper functions.
sys.path.insert(0, "TP_model")
from helper_functions import read_in_NNDSS
from Reff_constants import *
def read_in_posterior(date):
"""
read in samples from posterior from inference
"""
df = pd.read_hdf(
"results/"
+ date
+ "/soc_mob_posterior"
+ date
+ ".h5",
key="samples"
)
return df
def read_in_google(Aus_only=True, local=True, moving=False, moving_window=7):
"""
Read in the Google data set
"""
if local:
if type(local) == str:
df = pd.read_csv(local, parse_dates=["date"])
elif type(local) == bool:
local = "data/Global_Mobility_Report.csv"
df = pd.read_csv(local, parse_dates=["date"])
else:
# Download straight from the web
df = pd.read_csv(
"https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv",
parse_dates=["date"],
)
# Make it save automatically.
df.to_csv("data/Global_Mobility_Report.csv", index=False)
if Aus_only:
df = df.loc[df.country_region_code == "AU"]
# Change state column to state initials
df["state"] = df.sub_region_1.map(
lambda x: states_initials[x] if not pd.isna(x) else "AUS"
)
df = df.loc[df.sub_region_2.isna()]
if moving:
# generate moving average columns in reverse
df = df.sort_values(by="date")
mov_values = []
for val in value_vars:
mov_values.append(val[:-29] + "_7days")
# df[mov_values[-1]] = df.groupby(["state"])[val].transform(
# lambda x: x[::-1].rolling(moving_window, 1, center=True).mean()[::-1]
# ) # minimumnumber of 1
# # minimum of moving_window days for std, forward fill the rest
# df[mov_values[-1] + "_std"] = df.groupby(["state"])[val].transform(
# lambda x: x[::-1].rolling(moving_window, moving_window, center=True).std()[::-1]
# )
# MA was taken in reverse, what about when we do it normally?
df[mov_values[-1]] = df.groupby(["state"])[val].transform(
lambda x: x.rolling(moving_window, 1, center=True).mean()
) # minimumnumber of 1
# minimum of moving_window days for std, forward fill the rest
df[mov_values[-1] + "_std"] = df.groupby(["state"])[val].transform(
lambda x: x.rolling(moving_window, moving_window, center=True).std()
)
# fill final values as std doesn't work with single value
df[mov_values[-1] + "_std"] = df.groupby("state")[
mov_values[-1] + "_std"
].fillna(method="ffill")
# show latest date
print("Latest date in Google indices " + str(df.date.values[-1]))
name_addon = "ma" * moving + (1 - moving) * "standard"
df.to_csv("results/mobility_" + name_addon + ".csv")
return df
def predict_plot(
samples,
df,
moving=True,
grocery=True,
rho=None,
second_phase=False,
third_phase=False,
third_plot_type="combined",
):
"""
Produce posterior predictive plots for all states using the inferred mu_hat. This should run
regardless of the form of the model as it only requires the mu_hat parameter which is
calculated inside stan (the TP model fitted to the Reff).
"""
value_vars = [
"retail_and_recreation_percent_change_from_baseline",
"grocery_and_pharmacy_percent_change_from_baseline",
"parks_percent_change_from_baseline",
"transit_stations_percent_change_from_baseline",
"workplaces_percent_change_from_baseline",
"residential_percent_change_from_baseline",
]
value_vars.remove("residential_percent_change_from_baseline")
if not grocery:
value_vars.remove("grocery_and_pharmacy_percent_change_from_baseline")
if moving:
value_vars = [val[:-29] + "_7days" for val in value_vars]
# all states
fig, ax = plt.subplots(figsize=(15, 12), ncols=4, nrows=2, sharex=True, sharey=True)
states = sorted(list(states_initials.keys()))
if not third_phase:
states.remove("Northern Territory")
states.remove("Australian Capital Territory")
# no R_eff modelled for these states, skip
# counter for brho_v
pos = 0
for i, state in enumerate(states):
df_state = df.loc[df.sub_region_1 == state]
if second_phase:
df_state = df_state.loc[df_state.is_sec_wave == 1]
elif third_phase:
df_state = df_state.loc[df_state.is_third_wave == 1]
# directly plot the fitted TP values
states_to_fitd = {s: i + 1 for i, s in enumerate(rho)}
if not second_phase and not third_phase:
mu_hat = samples[
[
"mu_hat["
+ str(j + 1)
+ ","
+ str(states_to_fitd[states_initials[state]])
+ "]"
for j in range(df_state.shape[0])
]
].values.T
elif second_phase:
mu_hat = samples[
[
"mu_hat_sec[" + str(j + 1) + "]"
for j in range(
pos,
pos
+ df.loc[
df.state == states_initials[state]
].is_sec_wave.sum(),
)
]
].values.T
pos = (
pos
+ df.loc[
df.state == states_initials[state]
].is_sec_wave.sum()
)
elif third_phase:
if third_plot_type == "combined":
mu_hat = samples[
[
"mu_hat_third[" + str(j + 1) + "]"
for j in range(
pos,
pos
+ df.loc[
df.state == states_initials[state]
].is_third_wave.sum(),
)
]
].values.T
pos = (
pos
+ df.loc[
df.state == states_initials[state]
].is_third_wave.sum()
)
elif third_plot_type == "delta":
mu_hat = samples[
[
"mu_hat_delta_only[" + str(j + 1) + "]"
for j in range(
pos,
pos
+ df.loc[
df.state == states_initials[state]
].is_third_wave.sum(),
)
]
].values.T
pos = (
pos
+ df.loc[
df.state == states_initials[state]
].is_third_wave.sum()
)
elif third_plot_type == "omicron":
mu_hat = samples[
[
"mu_hat_omicron_only[" + str(j + 1) + "]"
for j in range(
pos,
pos
+ df.loc[
df.state == states_initials[state]
].is_omicron_wave.sum(),
)
]
].values.T
pos = (
pos
+ df.loc[
df.state == states_initials[state]
].is_omicron_wave.sum()
)
df_hat = pd.DataFrame(mu_hat.T)
# df_hat.to_csv('mu_hat_' + state + '.csv')
if states_initials[state] not in rho:
if i // 4 == 1:
ax[i // 4, i % 4].tick_params(axis="x", rotation=90)
continue
if not third_phase:
# plot actual R_eff
ax[i // 4, i % 4].plot(
df_state.date, df_state["mean"], label="$R_{eff}$", color="C1"
)
ax[i // 4, i % 4].fill_between(
df_state.date, df_state["bottom"], df_state["top"], color="C1", alpha=0.3
)
ax[i // 4, i % 4].fill_between(
df_state.date, df_state["lower"], df_state["upper"], color="C1", alpha=0.3
)
elif third_phase:
if third_plot_type in ("combined", "omicron"):
# plot actual R_eff
ax[i // 4, i % 4].plot(
df_state.date, df_state["mean_omicron"], label="$R_{eff}$", color="C1"
)
ax[i // 4, i % 4].fill_between(
df_state.date,
df_state["bottom_omicron"],
df_state["top_omicron"],
color="C1",
alpha=0.3
)
ax[i // 4, i % 4].fill_between(
df_state.date,
df_state["lower_omicron"],
df_state["upper_omicron"],
color="C1",
alpha=0.3
)
else:
# plot actual R_eff
ax[i // 4, i % 4].plot(
df_state.date, df_state["mean"], label="$R_{eff}$", color="C1"
)
ax[i // 4, i % 4].fill_between(
df_state.date,
df_state["bottom"],
df_state["top"],
color="C1",
alpha=0.3
)
ax[i // 4, i % 4].fill_between(
df_state.date,
df_state["lower"],
df_state["upper"],
color="C1",
alpha=0.3
)
ax[i // 4, i % 4].plot(
df_state.date, df_hat.quantile(0.5, axis=0), label="$\hat{\mu}$", color="C0"
)
ax[i // 4, i % 4].fill_between(
df_state.date,
df_hat.quantile(0.25, axis=0),
df_hat.quantile(0.75, axis=0),
color="C0",
alpha=0.3,
)
ax[i // 4, i % 4].fill_between(
df_state.date,
df_hat.quantile(0.05, axis=0),
df_hat.quantile(0.95, axis=0),
color="C0",
alpha=0.3,
)
ax[i // 4, i % 4].set_title(state)
# grid line at R_eff =1
ax[i // 4, i % 4].axhline(1, ls="--", c="k", lw=1)
ax[i // 4, i % 4].set_yticks([0, 1, 2], minor=False)
ax[i // 4, i % 4].set_yticklabels([0, 1, 2], minor=False)
ax[i // 4, i % 4].yaxis.grid(which="minor", linestyle="--", color="black", linewidth=2)
ax[i // 4, i % 4].set_ylim((0, 2.5))
if i // 4 == 1:
ax[i // 4, i % 4].tick_params(axis="x", rotation=90)
plt.legend()
return ax
def predict_multiplier_plot(samples, df, param=""):
"""
Produce posterior predictive plots for all states of the micro and macro factors. This should
enable us to look into the overall factor multiplying TP at any given time.
"""
# all states
fig, ax = plt.subplots(figsize=(15, 12), ncols=4, nrows=2, sharex=True, sharey=True)
states = sorted(list(states_initials.keys()))
# dictionary for mapping between plot type and variable name
factor_dict = {
"micro": "micro_factor",
"macro": "macro_factor",
"susceptibility": "sus_dep_factor"
}
pos = 0
for i, state in enumerate(states):
df_state = df.loc[df.sub_region_1 == state]
df_state = df_state.loc[df_state.is_third_wave == 1]
factor = samples[
[
factor_dict[param] + "[" + str(j + 1) + "]"
for j in range(
pos,
pos
+ df.loc[
df.state == states_initials[state]
].is_third_wave.sum(),
)
]
].values.T
pos = (
pos
+ df.loc[
df.state == states_initials[state]
].is_third_wave.sum()
)
df_hat = pd.DataFrame(factor.T)
# df_hat.to_csv('mu_hat_' + state + '.csv')
ax[i // 4, i % 4].plot(df_state.date, df_hat.quantile(0.5, axis=0), color="C0")
ax[i // 4, i % 4].fill_between(
df_state.date,
df_hat.quantile(0.25, axis=0),
df_hat.quantile(0.75, axis=0),
color="C0",
alpha=0.3,
)
ax[i // 4, i % 4].fill_between(
df_state.date,
df_hat.quantile(0.05, axis=0),
df_hat.quantile(0.95, axis=0),
color="C0",
alpha=0.3,
)
ax[i // 4, i % 4].set_title(state)
ax[i // 4, i % 4].set_yticks([0, 0.25, 0.5, 0.75, 1, 1.25], minor=False)
ax[i // 4, i % 4].set_yticklabels([0, 0.25, 0.5, 0.75, 1, 1.25], minor=False)
ax[i // 4, i % 4].axhline(1, ls="--", c="k", lw=1)
if i // 4 == 1:
ax[i // 4, i % 4].tick_params(axis="x", rotation=90)
plt.legend()
return ax
def macro_factor_plots(samples, df):
"""
Produce posterior predictive plots for all states of the micro and macro factors. This should
enable us to look into the overall factor multiplying TP at any given time.
"""
# all states
fig, ax = plt.subplots(figsize=(15, 12), ncols=4, nrows=2, sharex=True, sharey=True)
states = sorted(list(states_initials.keys()))
# dictionary for mapping between plot type and variable name
factor_dict = {
"micro": "micro_factor",
"macro": "macro_factor",
"susceptibility": "sus_dep_factor"
}
pos = 0
for i, state in enumerate(states):
df_state = df.loc[df.sub_region_1 == state]
df_state = df_state.loc[df_state.is_third_wave == 1]
data_factor = samples[
[
"macro_level_data[" + str(j + 1) + "]"
for j in range(
pos,
pos
+ df.loc[
df.state == states_initials[state]
].is_third_wave.sum(),
)
]
].values.T
inferred_factor = samples[
[
"macro_level_inferred[" + str(j + 1) + "]"
for j in range(
pos,
pos
+ df.loc[
df.state == states_initials[state]
].is_third_wave.sum(),
)
]
].values.T
pos = (
pos
+ df.loc[
df.state == states_initials[state]
].is_third_wave.sum()
)
df_data = pd.DataFrame(data_factor.T)
df_inferred = pd.DataFrame(inferred_factor.T)
# df_hat.to_csv('mu_hat_' + state + '.csv')
ax[i // 4, i % 4].plot(df_state.date, df_data.quantile(0.5, axis=0), color="C0")
ax[i // 4, i % 4].fill_between(
df_state.date,
df_data.quantile(0.25, axis=0),
df_data.quantile(0.75, axis=0),
color="C0",
alpha=0.3,
)
ax[i // 4, i % 4].fill_between(
df_state.date,
df_data.quantile(0.05, axis=0),
df_data.quantile(0.95, axis=0),
color="C0",
alpha=0.3,
)
ax[i // 4, i % 4].plot(df_state.date, df_inferred.quantile(0.5, axis=0), color="C1")
ax[i // 4, i % 4].fill_between(
df_state.date,
df_inferred.quantile(0.25, axis=0),
df_inferred.quantile(0.75, axis=0),
color="C1",
alpha=0.3,
)
ax[i // 4, i % 4].fill_between(
df_state.date,
df_inferred.quantile(0.05, axis=0),
df_inferred.quantile(0.95, axis=0),
color="C1",
alpha=0.3,
)
ax[i // 4, i % 4].set_title(state)
ax[i // 4, i % 4].set_yticks([0, 0.25, 0.5, 0.75, 1, 1.25], minor=False)
ax[i // 4, i % 4].set_yticklabels([0, 0.25, 0.5, 0.75, 1, 1.25], minor=False)
ax[i // 4, i % 4].axhline(1, ls="--", c="k", lw=1)
if i // 4 == 1:
ax[i // 4, i % 4].tick_params(axis="x", rotation=90)
plt.legend()
return ax
def plot_adjusted_ve(
data_date,
samples_mov_gamma,
states,
vaccination_by_state,
third_states,
third_date_range,
ve_samples,
ve_idx_ranges,
figs_dir,
strain,
):
"""
A function to process the inferred VE. This will save an updated timeseries which
is the mean posterior estimates.
"""
fig, ax = plt.subplots(figsize=(15, 12), ncols=2, nrows=4, sharey=True, sharex=True)
# temporary state vector
# make a dataframe for the adjusted vacc_ts
df_vacc_ts_adjusted = pd.DataFrame()
# for i, state in enumerate(third_states):
for i, state in enumerate(states):
# for i, state in enumerate(states_tmp):
# grab states vaccination data
vacc_ts_data = vaccination_by_state.loc[state]
# apply different vaccine form depending on if NSW
if state in third_states:
# get the sampled vaccination effect (this will be incomplete as it's only
# over the fitting period)
vacc_tmp = ve_samples.iloc[ve_idx_ranges[state], :]
# get before and after fitting and tile them
vacc_ts_data_before = pd.concat(
[vacc_ts_data.loc[vacc_ts_data.index < third_date_range[state][0]]]
* samples_mov_gamma.shape[0],
axis=1,
)
vacc_ts_data_after = pd.concat(
[vacc_ts_data.loc[vacc_ts_data.index > third_date_range[state][-1]]]
* samples_mov_gamma.shape[0],
axis=1,
)
# rename columns for easy merging
vacc_ts_data_before.columns = vacc_tmp.columns
vacc_ts_data_after.columns = vacc_tmp.columns
# merge in order
vacc_ts = pd.concat(
[vacc_ts_data_before, vacc_tmp, vacc_ts_data_after],
axis=0,
ignore_index=True,
)
vacc_ts.set_index(vacc_ts_data.index[: vacc_ts.shape[0]], inplace=True)
else:
# just tile the data
vacc_ts = pd.concat(
[vacc_ts_data] * samples_mov_gamma.shape[0],
axis=1,
)
# reset the index to be the dates for easier information handling
vacc_ts.set_index(vacc_ts_data.index, inplace=True)
# need to name columns samples for consistent indexing
vacc_ts.columns = range(0, samples_mov_gamma.shape[0])
dates = vacc_ts.index
vals = vacc_ts.median(axis=1).values
state_vec = np.repeat([state], vals.shape[0])
df_vacc_ts_adjusted = pd.concat(
[
df_vacc_ts_adjusted,
| pd.DataFrame({"state": state_vec, "date": dates, "effect": vals}) | pandas.DataFrame |
from context import dero
import pandas as pd
from pandas.util.testing import assert_frame_equal
from pandas import Timestamp
from numpy import nan
import numpy
class DataFrameTest:
df = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_duplicate_row = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/3/2000', 1.03), #this is a duplicated row
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_weight = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0),
(10516, 'a', '1/2/2000', 1.02, 1),
(10516, 'a', '1/3/2000', 1.03, 1),
(10516, 'a', '1/4/2000', 1.04, 0),
(10516, 'b', '1/1/2000', 1.05, 1),
(10516, 'b', '1/2/2000', 1.06, 1),
(10516, 'b', '1/3/2000', 1.07, 1),
(10516, 'b', '1/4/2000', 1.08, 1),
(10517, 'a', '1/1/2000', 1.09, 0),
(10517, 'a', '1/2/2000', 1.1, 0),
(10517, 'a', '1/3/2000', 1.11, 0),
(10517, 'a', '1/4/2000', 1.12, 1),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight'])
df_nan_byvar = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', 3),
('b', 4),
], columns = ['byvar', 'val'])
df_nan_byvar_and_val = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', nan),
('b', 4),
], columns = ['byvar', 'val'])
single_ticker_df = pd.DataFrame(data = [
('a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['byvar', 'Date', 'TICKER'])
df_datetime = df.copy()
df_datetime['Date'] = pd.to_datetime(df_datetime['Date'])
df_datetime_no_ret = df_datetime.copy()
df_datetime_no_ret.drop('RET', axis=1, inplace=True)
df_gvkey_str = pd.DataFrame([
('001076','3/1/1995'),
('001076','4/1/1995'),
('001722','1/1/2012'),
('001722','7/1/2012'),
('001722', nan),
(nan ,'1/1/2012')
], columns=['GVKEY','Date'])
df_gvkey_str['Date'] = pd.to_datetime(df_gvkey_str['Date'])
df_gvkey_num = df_gvkey_str.copy()
df_gvkey_num['GVKEY'] = df_gvkey_num['GVKEY'].astype('float64')
df_gvkey_str2 = pd.DataFrame([
('001076','2/1/1995'),
('001076','3/2/1995'),
('001722','11/1/2011'),
('001722','10/1/2011'),
('001722', nan),
(nan ,'1/1/2012')
], columns=['GVKEY','Date'])
df_gvkey_str2['Date'] = pd.to_datetime(df_gvkey_str2['Date'])
df_fill_data = pd.DataFrame(
data=[
(4, 'c', nan, 'a'),
(1, 'd', 3, 'a'),
(10, 'e', 100, 'a'),
(2, nan, 6, 'b'),
(5, 'f', 8, 'b'),
(11, 'g', 150, 'b'),
],
columns=['y', 'x1', 'x2', 'group']
)
class TestCumulate(DataFrameTest):
expect_between_1_3 = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1.01),
(10516, 'a', '1/2/2000', 1.02, 1.02),
(10516, 'a', '1/3/2000', 1.03, 1.0506),
(10516, 'a', '1/4/2000', 1.04, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.05),
(10516, 'b', '1/2/2000', 1.06, 1.06),
(10516, 'b', '1/3/2000', 1.07, 1.1342),
(10516, 'b', '1/4/2000', 1.08, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.09),
(10517, 'a', '1/2/2000', 1.1, 1.1),
(10517, 'a', '1/3/2000', 1.11, 1.2210000000000003),
(10517, 'a', '1/4/2000', 1.12, 1.12),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'cum_RET'])
expect_first = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01, 1.01),
(10516, 'a', '1/2/2000', 1.02, 1.02),
(10516, 'a', '1/3/2000', 1.03, 1.0506),
(10516, 'a', '1/4/2000', 1.04, 1.092624),
(10516, 'b', '1/1/2000', 1.05, 1.05),
(10516, 'b', '1/2/2000', 1.06, 1.06),
(10516, 'b', '1/3/2000', 1.07, 1.1342),
(10516, 'b', '1/4/2000', 1.08, 1.224936),
(10517, 'a', '1/1/2000', 1.09, 1.09),
(10517, 'a', '1/2/2000', 1.10, 1.10),
(10517, 'a', '1/3/2000', 1.11, 1.221),
(10517, 'a', '1/4/2000', 1.12, 1.36752),
], columns = ['PERMNO','byvar','Date', 'RET', 'cum_RET'])
def test_method_between_1_3(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[1,3])
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_method_between_m2_0(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[-2,0])
#Actually same result as [1,3]
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_shifted_index(self):
df = self.df.copy()
df.index = df.index + 10
cum_df = dero.pandas.cumulate(df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[-2,0])
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_method_first(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'first', periodvar='Date',
byvars=['PERMNO','byvar'])
assert_frame_equal(self.expect_first, cum_df, check_dtype=False)
def test_grossify(self):
df = self.df.copy() #don't overwrite original
df['RET'] -= 1 #ungrossify
expect_first_grossify = self.expect_first.copy()
expect_first_grossify['cum_RET'] -= 1
expect_first_grossify['RET'] -= 1
cum_df = dero.pandas.cumulate(df, 'RET', 'first', periodvar='Date',
byvars=['PERMNO','byvar'], grossify=True)
assert_frame_equal(expect_first_grossify, cum_df, check_dtype=False)
class TestGroupbyMerge(DataFrameTest):
def test_subset_max(self):
byvars = ['PERMNO','byvar']
out = dero.pandas.groupby_merge(self.df, byvars, 'max', subset='RET')
expect_df = pd.DataFrame(
[(10516, 'a', '1/1/2000', 1.01, 1.04),
(10516, 'a', '1/2/2000', 1.02, 1.04),
(10516, 'a', '1/3/2000', 1.03, 1.04),
(10516, 'a', '1/4/2000', 1.04, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.08),
(10516, 'b', '1/2/2000', 1.06, 1.08),
(10516, 'b', '1/3/2000', 1.07, 1.08),
(10516, 'b', '1/4/2000', 1.08, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.12),
(10517, 'a', '1/2/2000', 1.10, 1.12),
(10517, 'a', '1/3/2000', 1.11, 1.12),
(10517, 'a', '1/4/2000', 1.12, 1.12)],
columns = ['PERMNO','byvar','Date', 'RET', 'RET_max'])
assert_frame_equal(expect_df, out)
def test_subset_std(self):
byvars = ['PERMNO','byvar']
out = dero.pandas.groupby_merge(self.df, byvars, 'std', subset='RET')
expect_df = pd.DataFrame(
[(10516, 'a', '1/1/2000', 1.01, 0.012909944487358068),
(10516, 'a', '1/2/2000', 1.02, 0.012909944487358068),
(10516, 'a', '1/3/2000', 1.03, 0.012909944487358068),
(10516, 'a', '1/4/2000', 1.04, 0.012909944487358068),
(10516, 'b', '1/1/2000', 1.05, 0.012909944487358068),
(10516, 'b', '1/2/2000', 1.06, 0.012909944487358068),
(10516, 'b', '1/3/2000', 1.07, 0.012909944487358068),
(10516, 'b', '1/4/2000', 1.08, 0.012909944487358068),
(10517, 'a', '1/1/2000', 1.09, 0.012909944487358068),
(10517, 'a', '1/2/2000', 1.10, 0.012909944487358068),
(10517, 'a', '1/3/2000', 1.11, 0.012909944487358068),
(10517, 'a', '1/4/2000', 1.12, 0.012909944487358068)],
columns = ['PERMNO','byvar','Date', 'RET', 'RET_std'])
assert_frame_equal(expect_df, out)
def test_nan_byvar_transform(self):
expect_df = self.df_nan_byvar.copy()
expect_df['val_transform'] = expect_df['val']
out = dero.pandas.groupby_merge(self.df_nan_byvar, 'byvar', 'transform', (lambda x: x))
assert_frame_equal(expect_df, out)
def test_nan_byvar_and_nan_val_transform_numeric(self):
non_standard_index = self.df_nan_byvar_and_val.copy()
non_standard_index.index = [5,6,7,8]
expect_df = self.df_nan_byvar_and_val.copy()
expect_df['val_transform'] = expect_df['val'] + 1
expect_df.index = [5,6,7,8]
out = dero.pandas.groupby_merge(non_standard_index, 'byvar', 'transform', (lambda x: x + 1))
assert_frame_equal(expect_df, out)
def test_nan_byvar_and_nan_val_and_nonstandard_index_transform_numeric(self):
expect_df = self.df_nan_byvar_and_val.copy()
expect_df['val_transform'] = expect_df['val'] + 1
def test_nan_byvar_sum(self):
expect_df = pd.DataFrame(data = [
('a', 1, 1.0),
(nan, 2, nan),
('b', 3, 7.0),
('b', 4, 7.0),
], columns = ['byvar', 'val', 'val_sum'])
out = dero.pandas.groupby_merge(self.df_nan_byvar, 'byvar', 'sum')
assert_frame_equal(expect_df, out)
class TestLongToWide:
expect_df_with_colindex = pd.DataFrame(data = [
(10516, 'a', 1.01, 1.02, 1.03, 1.04),
(10516, 'b', 1.05, 1.06, 1.07, 1.08),
(10517, 'a', 1.09, 1.1, 1.11, 1.12),
], columns = ['PERMNO', 'byvar',
'RET1/1/2000', 'RET1/2/2000',
'RET1/3/2000', 'RET1/4/2000'])
expect_df_no_colindex = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/2/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/3/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/4/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/2/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/3/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/4/2000', 1.05, 1.06, 1.07, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/2/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/3/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/4/2000', 1.09, 1.1, 1.11, 1.12),
], columns = ['PERMNO', 'byvar', 'Date', 'RET0',
'RET1', 'RET2', 'RET3'])
input_data = DataFrameTest()
ltw_no_dup_colindex = dero.pandas.long_to_wide(input_data.df,
['PERMNO', 'byvar'], 'RET', colindex='Date')
ltw_dup_colindex = dero.pandas.long_to_wide(input_data.df_duplicate_row,
['PERMNO', 'byvar'], 'RET', colindex='Date')
ltw_no_dup_no_colindex = dero.pandas.long_to_wide(input_data.df,
['PERMNO', 'byvar'], 'RET')
ltw_dup_no_colindex = dero.pandas.long_to_wide(input_data.df_duplicate_row,
['PERMNO', 'byvar'], 'RET')
df_list = [ltw_no_dup_colindex, ltw_dup_colindex,
ltw_no_dup_no_colindex, ltw_dup_no_colindex]
def test_no_duplicates_with_colindex(self):
assert_frame_equal(self.expect_df_with_colindex, self.ltw_no_dup_colindex)
def test_duplicates_with_colindex(self):
assert_frame_equal(self.expect_df_with_colindex, self.ltw_dup_colindex)
def test_no_duplicates_no_colindex(self):
assert_frame_equal(self.expect_df_no_colindex, self.ltw_no_dup_no_colindex)
def test_duplicates_no_colindex(self):
assert_frame_equal(self.expect_df_no_colindex, self.ltw_dup_no_colindex)
def test_no_extra_vars(self):
for df in self.df_list:
assert ('__idx__','__key__') not in df.columns
class TestPortfolioAverages:
input_data = DataFrameTest()
expect_avgs_no_wt = pd.DataFrame(data = [
(1, 'a', 1.0250000000000001),
(1, 'b', 1.0550000000000002),
(2, 'a', 1.1050000000000002),
(2, 'b', 1.0750000000000002),
], columns = ['portfolio', 'byvar', 'RET'])
expect_avgs_wt = pd.DataFrame(data = [
(1, 'a', 1.0250000000000001, 1.025),
(1, 'b', 1.0550000000000002, 1.0550000000000002),
(2, 'a', 1.1050000000000002, 1.12),
(2, 'b', 1.0750000000000002, 1.0750000000000002),
], columns = ['portfolio', 'byvar', 'RET', 'RET_wavg'])
expect_ports = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0, 1),
(10516, 'a', '1/2/2000', 1.02, 1, 1),
(10516, 'a', '1/3/2000', 1.03, 1, 1),
(10516, 'a', '1/4/2000', 1.04, 0, 1),
(10516, 'b', '1/1/2000', 1.05, 1, 1),
(10516, 'b', '1/2/2000', 1.06, 1, 1),
(10516, 'b', '1/3/2000', 1.07, 1, 2),
(10516, 'b', '1/4/2000', 1.08, 1, 2),
(10517, 'a', '1/1/2000', 1.09, 0, 2),
(10517, 'a', '1/2/2000', 1.1, 0, 2),
(10517, 'a', '1/3/2000', 1.11, 0, 2),
(10517, 'a', '1/4/2000', 1.12, 1, 2),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight', 'portfolio'])
avgs, ports = dero.pandas.portfolio_averages(input_data.df_weight, 'RET', 'RET', ngroups=2,
byvars='byvar')
w_avgs, w_ports = dero.pandas.portfolio_averages(input_data.df_weight, 'RET', 'RET', ngroups=2,
byvars='byvar', wtvar='weight')
def test_simple_averages(self):
assert_frame_equal(self.expect_avgs_no_wt, self.avgs, check_dtype=False)
def test_weighted_averages(self):
assert_frame_equal(self.expect_avgs_wt, self.w_avgs, check_dtype=False)
def test_portfolio_construction(self):
print(self.ports)
assert_frame_equal(self.expect_ports, self.ports, check_dtype=False)
assert_frame_equal(self.expect_ports, self.w_ports, check_dtype=False)
class TestWinsorize(DataFrameTest):
def test_winsor_40_subset_byvars(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.022624),
(10516, 'a', '1/2/2000', 1.022624),
(10516, 'a', '1/3/2000', 1.02672),
(10516, 'a', '1/4/2000', 1.02672),
(10516, 'b', '1/1/2000', 1.062624),
(10516, 'b', '1/2/2000', 1.062624),
(10516, 'b', '1/3/2000', 1.06672),
(10516, 'b', '1/4/2000', 1.06672),
(10517, 'a', '1/1/2000', 1.102624),
(10517, 'a', '1/2/2000', 1.102624),
(10517, 'a', '1/3/2000', 1.10672),
(10517, 'a', '1/4/2000', 1.10672),
], columns = ['PERMNO', 'byvar', 'Date', 'RET'])
wins = dero.pandas.winsorize(self.df, .4, subset='RET', byvars=['PERMNO','byvar'])
assert_frame_equal(expect_df, wins, check_less_precise=True)
class TestRegBy(DataFrameTest):
def create_indf(self):
indf = self.df_weight.copy()
indf['key'] = indf['PERMNO'].astype(str) + '_' + indf['byvar']
return indf
def test_regby_nocons(self):
indf = self.create_indf()
expect_df = pd.DataFrame(data = [
(0.48774684748988806, '10516_a'),
(0.9388636664168903, '10516_b'),
(0.22929206076239614, '10517_a'),
], columns = ['coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key', cons=False)
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
def test_regby_cons(self):
indf = self.create_indf()
expect_df = pd.DataFrame(data = [
(0.49999999999999645, 5.329070518200751e-15, '10516_a'),
(0.9999999999999893, 1.0658141036401503e-14, '10516_b'),
(-32.89999999999997, 29.999999999999982, '10517_a'),
], columns = ['const', 'coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key')
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
def test_regby_cons_low_obs(self):
indf = self.create_indf().loc[:8,:] #makes it so that one byvar only has one obs
expect_df = pd.DataFrame(data = [
(0.49999999999999645, 5.329070518200751e-15, '10516_a'),
(0.9999999999999893, 1.0658141036401503e-14, '10516_b'),
(nan, nan, '10517_a'),
], columns = ['const', 'coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key')
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
class TestExpandMonths(DataFrameTest):
def test_expand_months_tradedays(self):
expect_df = pd.DataFrame(data = [
(Timestamp('2000-01-03 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-04 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-05 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-06 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-07 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-10 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-11 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-12 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-13 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-14 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-18 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-19 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-20 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-21 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-24 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-25 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-26 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-27 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-28 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-31 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['Daily Date', 'byvar', 'Date', 'TICKER'])
em = dero.pandas.expand_months(self.single_ticker_df)
assert_frame_equal(expect_df.sort_index(axis=1), em.sort_index(axis=1))
def test_expand_months_calendardays(self):
expect_df = pd.DataFrame(data = [
(Timestamp('2000-01-01 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-02 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-03 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-04 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-05 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-06 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-07 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-08 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-09 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-10 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-11 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-12 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-13 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-14 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-15 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-16 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-17 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-18 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-19 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-20 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-21 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-22 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-23 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-24 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-25 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-26 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-27 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-28 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-29 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-30 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-31 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['Daily Date', 'byvar', 'Date', 'TICKER'])
em = dero.pandas.expand_months(self.single_ticker_df, trade_days=False)
assert_frame_equal(expect_df.sort_index(axis=1), em.sort_index(axis=1))
class TestPortfolio(DataFrameTest):
def test_portfolio_byvars(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1),
(10516, 'a', '1/2/2000', 1.02, 1),
(10516, 'a', '1/3/2000', 1.03, 2),
(10516, 'a', '1/4/2000', 1.04, 2),
(10516, 'b', '1/1/2000', 1.05, 1),
(10516, 'b', '1/2/2000', 1.06, 1),
(10516, 'b', '1/3/2000', 1.07, 2),
(10516, 'b', '1/4/2000', 1.08, 2),
(10517, 'a', '1/1/2000', 1.09, 1),
(10517, 'a', '1/2/2000', 1.1, 1),
(10517, 'a', '1/3/2000', 1.11, 2),
(10517, 'a', '1/4/2000', 1.12, 2),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'portfolio'])
p = dero.pandas.portfolio(self.df, 'RET', ngroups=2, byvars=['PERMNO','byvar'])
assert_frame_equal(expect_df, p, check_dtype=False)
def test_portfolio_with_nan_and_byvars(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', nan, 0),
(10516, 'a', '1/2/2000', 1.02, 1),
(10516, 'a', '1/3/2000', 1.03, 1), #changed from 2 to 1 when updated nan handling
(10516, 'a', '1/4/2000', 1.04, 2),
(10516, 'b', '1/1/2000', 1.05, 1),
(10516, 'b', '1/2/2000', 1.06, 1),
(10516, 'b', '1/3/2000', 1.07, 2),
(10516, 'b', '1/4/2000', 1.08, 2),
(10517, 'a', '1/1/2000', 1.09, 1),
(10517, 'a', '1/2/2000', 1.1, 1),
(10517, 'a', '1/3/2000', 1.11, 2),
(10517, 'a', '1/4/2000', 1.12, 2),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'portfolio'])
indf = self.df.copy()
indf.loc[0, 'RET'] = nan
p = dero.pandas.portfolio(indf, 'RET', ngroups=2, byvars=['PERMNO','byvar'])
assert_frame_equal(expect_df, p, check_dtype=False)
class TestConvertSASDateToPandasDate:
df_sasdate = pd.DataFrame(data = [
('011508', 16114.0),
('011508', 16482.0),
('011508', 17178.0),
('011508', 17197.0),
('011508', 17212.0),
], columns = ['gvkey', 'datadate'])
df_sasdate_nan = pd.DataFrame(data = [
('011508', 16114.0),
('011508', 16482.0),
('011508', 17178.0),
('011508', 17197.0),
('011508', nan),
('011508', 17212.0),
], columns = ['gvkey', 'datadate'])
def test_convert(self):
expect_df = pd.DataFrame(data = [
(numpy.datetime64('2004-02-13T00:00:00.000000000'),),
(numpy.datetime64('2005-02-15T00:00:00.000000000'),),
(numpy.datetime64('2007-01-12T00:00:00.000000000'),),
(numpy.datetime64('2007-01-31T00:00:00.000000000'),),
(numpy.datetime64('2007-02-15T00:00:00.000000000'),),
], columns = [0])
converted = pd.DataFrame(dero.pandas.convert_sas_date_to_pandas_date(self.df_sasdate['datadate']))
assert_frame_equal(expect_df, converted)
def test_convert_nan(self):
expect_df = pd.DataFrame(data = [
(numpy.datetime64('2004-02-13T00:00:00.000000000'),),
(numpy.datetime64('2005-02-15T00:00:00.000000000'),),
(numpy.datetime64('2007-01-12T00:00:00.000000000'),),
(numpy.datetime64('2007-01-31T00:00:00.000000000'),),
(numpy.datetime64('NaT'),),
(numpy.datetime64('2007-02-15T00:00:00.000000000'),),
], columns = [0])
converted = pd.DataFrame(dero.pandas.convert_sas_date_to_pandas_date(self.df_sasdate_nan['datadate']))
assert_frame_equal(expect_df, converted)
class TestMapWindows(DataFrameTest):
times = [
[-4, -2, 0],
[-3, 1, 2],
[4, 5, 6],
[0, 1, 2],
[-1, 0, 1]
]
df_period_str = pd.DataFrame([
(10516, '1/1/2000', 1.01),
(10516, '1/2/2000', 1.02),
(10516, '1/3/2000', 1.03),
(10516, '1/4/2000', 1.04),
(10516, '1/5/2000', 1.05),
(10516, '1/6/2000', 1.06),
(10516, '1/7/2000', 1.07),
(10516, '1/8/2000', 1.08),
(10517, '1/1/2000', 1.09),
(10517, '1/2/2000', 1.10),
(10517, '1/3/2000', 1.11),
(10517, '1/4/2000', 1.12),
(10517, '1/5/2000', 1.05),
(10517, '1/6/2000', 1.06),
(10517, '1/7/2000', 1.07),
(10517, '1/8/2000', 1.08),
], columns = ['PERMNO','Date', 'RET'])
df_period = df_period_str.copy()
df_period['Date'] = pd.to_datetime(df_period['Date'])
expect_dfs = [
pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 1),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 2),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 2),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 1),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 2),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 2),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10517, | Timestamp('2000-01-07 00:00:00') | pandas.Timestamp |
# import libraries
import sys
import os
import time
import csv
import re # regex
import time
import pandas as pd
import numpy as np
#from langdetect import detect
from collections import Counter
from nltk.corpus import stopwords as sw
from nltk.tokenize import word_tokenize
from nltk.stem.snowball import ItalianStemmer
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.naive_bayes import GaussianNB, MultinomialNB, ComplementNB
from sklearn.metrics import f1_score, confusion_matrix
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
def loadData(file,dev=False) :
# check if files exist
if not os.path.isfile(file) :
print (f"File not found in specified path ({file})")
sys.exit(1)
df = | pd.read_csv(file, encoding='utf-8') | pandas.read_csv |
#!/usr/bin/env python
import os
import pickle as pickle
from djeval import *
import numpy as np
from pandas import DataFrame, Series, read_pickle, concat, cut, qcut
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.externals import joblib
MG_CLIP = -300
def quantile_scorer_two(estimator, X, y):
pred_y = estimator.predict(X)
print(DataFrame([X['elo'], y, pred_y]))
mask = y < pred_y
return float(mask.sum()) / y.shape[0]
msg('loading blundermodels')
blundermodel_dir = sys.argv[1]
thing = joblib.load(blundermodel_dir + 'groups.p')
elo_bins = thing[0]
mg_quants = thing[1]
print('elo_bins is %s' % str(elo_bins))
print('mg_quants is %s' % str(mg_quants))
blundermodels = {}
num_models = (len(elo_bins) - 1) * (len(mg_quants) + 1)
msg('Loading the %i models' % num_models)
for modelnum in range(0,num_models):
thing = joblib.load('%s%i.p' % (blundermodel_dir, modelnum))
elo_name = thing[0]
mg_quant = thing[1]
model = thing[2]
blundermodels[elo_name, mg_quant] = model
msg('reading movedata')
moves_df = read_pickle('/data/movedata.p')
moves_df['clipped_movergain'] = moves_df['movergain'].clip(MG_CLIP,0)
train_df = moves_df[moves_df['elo'].notnull()]
fitted_df = train_df[train_df['gamenum'] % 2 == 0]
test_df = train_df[train_df['gamenum'] % 2 == 1]
features = ['side', 'halfply', 'moverscore', 'bestmove_is_capture', 'bestmove_is_check', 'depth', 'seldepth', 'num_bestmoves', 'num_bestmove_changes', 'bestmove_depths_agreeing', 'deepest_change']
for key, model in blundermodels.items():
elo_name = key[0]
mg_quant = key[1]
elo_bounds = [float(x) for x in elo_name[1:-1].split(', ')]
moves_to_test = fitted_df[(fitted_df['elo'] >= elo_bounds[0]) & (fitted_df['elo'] <= elo_bounds[1])]
diagnostic_cols_to_show = ['gamenum','elo','perfect_pred','movergain']
diagnostic_cols_to_show.extend(features)
if mg_quant == 1.0:
X = moves_to_test[features]
y = (moves_to_test['clipped_movergain'] == 0)
pred_y = model.predict_proba(X)
pred_y = [x[1] for x in pred_y]
combo = concat([Series(y.values), Series(pred_y)], axis=1)
combo_groups = cut(combo[1], 10)
print(("%s perfect-move model prediction distribution and success:\n%s" % (elo_name, combo.groupby(combo_groups)[0].agg({'mean': np.mean, 'count': len}))))
moves_to_test['perfect_pred'] = pred_y
print("MOVES MOST LIKELY TO MAKE THE BEST MOVE:")
print(moves_to_test.sort('perfect_pred', ascending=False)[diagnostic_cols_to_show].head())
print("MOVES LEAST LIKELY TO MAKE THE BEST MOVE:")
print(moves_to_test.sort('perfect_pred', ascending=True)[diagnostic_cols_to_show].head())
else:
imperfect_moves = moves_to_test[moves_to_test['clipped_movergain'] < 0]
X = imperfect_moves[features]
y = imperfect_moves['clipped_movergain']
pred_y = model.predict(X)
mask = y < pred_y
score = float(mask.sum()) / y.shape[0]
print(('imperfect-move error-size quantile model for %s: true quantile is %f' % (key, score)))
combo = concat([ | Series(y.values) | pandas.Series |
""" ecospold2matrix - Class for recasting ecospold2 dataset in matrix form.
The module provides function to parse ecospold2 data, notably ecoinvent 3, as
Leontief A-matrix and extensions, or alternatively as supply and use tables for
the unallocated version of ecoinvent.
:PythonVersion: 3
:Dependencies: pandas 0.14.1 or more recent, scipy, numpy, lxml and xml
License: BDS
Authors:
<NAME>
<NAME>
<NAME>
<NAME>
Credits:
This module re-uses/adapts code from brightway2data, more specifically the
Ecospold2DataExtractor class in import_ecospold2.py, changeset:
271:7e67a75ed791; Wed Sep 10; published under BDS-license:
Copyright (c) 2014, <NAME> and ETH Zürich
Neither the name of ETH Zürich nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE
COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import pdb
import os
import glob
import io
import pkgutil
import subprocess
from lxml import objectify
import xml.etree.ElementTree as ET
from lxml import etree
import pandas as pd
_df = pd.DataFrame
import numpy as np
import scipy.sparse
import scipy.io
import logging
import pickle
import gzip
import csv
import shelve
import hashlib
import sqlite3
try:
import IPython
except:
pass
import re
import xlrd
import xlwt
import copy
# pylint: disable-msg=C0103
class Ecospold2Matrix(object):
"""
Defines a parser object that holds all project parameters and processes the
ecospold-formatted data into matrices of choice.
The two main functions of this class are ecospold_to_Leontief() and
ecospold_to_sut()
"""
# Some hardcoded stuff
__PRE = '{http://www.EcoInvent.org/EcoSpold02}'
__ELEXCHANGE = 'ElementaryExchanges.xml'
__INTERMEXCHANGE = 'IntermediateExchanges.xml'
__ACTIVITYINDEX = 'ActivityIndex.xml'
__DB_CHARACTERISATION = 'characterisation.db'
rtolmin = 1e-16 # 16 significant digits being roughly the limit of float64
__TechnologyLevels = pd.Series(
['Undefined', 'New', 'Modern', 'Current', 'Old', 'Outdated'],
index=[0, 1, 2, 3, 4, 5])
def __init__(self, sys_dir, project_name, out_dir='.', lci_dir=None,
positive_waste=False, prefer_pickles=False, nan2null=False,
save_interm=True, PRO_order=['ISIC', 'activityName'],
STR_order=['comp', 'name', 'subcomp'],
verbose=True, version_name='ecoinvent31',
unlinked = True, remove_markets=True):
""" Defining an ecospold2matrix object, with key parameters that
determine how the data will be processes.
Args:
-----
* sys_dir: directory containing the system description,i.e., ecospold
dataset and master XML files
* project_name: Name used to log progress and save results
* out_dir: Directory where to save result matrices and logs
* lci_dir: Directory where official cummulative LCI ecospold files are
* positive_waste: Whether or not to change sign convention and make
waste flows positive
[default false]
* prefer_pickles: If sys_dir contains pre-processed data in form of
pickle-files, whether or not to use those
[Default: False, don't use]
* nan2null: Whether or not to replace Not-a-Number by 0.0
[Default: False, don't replace anything]
* save_interm: Whether or not to save intermediate results as pickle
files for potential re-use
[Default: True, do it]
* PRO_order: List of meta-data used for sorting processes in the
different matrices.
[Default: first sort by order of ISIC code, then, within
each code, by order of activity name]
* PRO_order: List of meta-data used for sorting stressors (elementary
flows) in the different matrices.
[Default: first sort by order of compartment,
subcompartment and then by name]
* unlinked: Whether or not the datasets are linked/allocated.
[Default: True, the data are unlinked]
Main functions and worflow:
---------------------------
self.ecospold_to_Leontief(): Turn ecospold files into Leontief matrix
representation
* Parse ecospold files, get products, activities, flows, emissions
* If need be, correct inconsistencies in system description
* After corrections, create "final" labels for matrices
* Generate symmetric, normalized system description (A-matrix,
extension F-matrix)
* Save to file (many different formats)
* Optionally, read cummulative lifecycle inventories (slow) and
compare to calculated LCI for sanity testing
self.ecospold_to_sut(): Turn unallocated ecospold into Suppy and Use
Tables
* Parse ecospold files, get products, activities, flows, emissions
* Organize in supply and use
* optionally, aggregate sources to generate a fully untraceable SUT
* Save to file
"""
# INTERMEDIATE DATA/RESULTS, TO BE GENERATED BY OBJECT METHODS
self.products = None # products, with IDs and descriptions
self.activities = None # activities, w IDs and description
self.inflows = None # intermediate-exchange input flows
self.outflows = None # intermediate-exchange output flows
self.prices = None
self.elementary_flows = None # elementary flows
self.q = None # total supply of each product
self.PRO_old=None
self.STR_old = None
self.IMP_old=None
# FINAL VARIABLES: SYMMETRIC SYSTEM, NORMALIZED AND UNNORMALIZED
self.PRO = None # Process labels, rows/cols of A-matrix
self.STR = None # Factors labels, rows extensions
self.IMP = pd.DataFrame([]) # impact categories
self.A = None # Normalized Leontief coefficient matrix
self.F = None # Normalized factors of production,i.e.,
# elementary exchange coefficients
self.Z = None # Intermediate unnormalized process flows
self.G_pro = None # Unnormalized Process factor requirements
self.C = pd.DataFrame([]) # characterisation matrix
# Final variables, unallocated and unnormalized inventory
self.U = None # Table of use of products by activities
self.V = None # Table of supply of product by activities
# (ammounts for which use is recorded)
self.G_act = None # Table of factor use by activities
self.V_prodVol = None # Table of supply production volumes
# (potentially to rescale U, V and G)
# QUALITY CHECKS VARIABLES, TO BE GENERATED BY OBJECT METHODS.
self.E = None # cummulative LCI matrix (str x pro)
self.unsourced_flows = None # product flows without clear source
self.missing_activities = None # cases of no incomplete dataset, i.e.,
# no producer for a product
# PROJECT NAME AND DIRECTORIES, FROM ARGUMENTS
self.sys_dir = os.path.abspath(sys_dir)
self.project_name = project_name
self.out_dir = os.path.abspath(out_dir)
if lci_dir:
self.lci_dir = os.path.abspath(lci_dir)
else:
self.lci_dir = lci_dir
self.version_name = version_name
self.char_method = None # characterisation method set by
# read_characterisation function
self.data_version = None
# PROJECT-WIDE OPTIONS
self.positive_waste = positive_waste
self.prefer_pickles = prefer_pickles
self.nan2null = nan2null
self.save_interm = save_interm
self.PRO_order = PRO_order
self.STR_order = STR_order
# DATASETS UNLINKED/UNALLOCATED
self.unlinked = unlinked # Is the data (spold files) linked and allocated or not. Default = True data is NOT linked
self.remove_markets = remove_markets # If the data is unlinked, remove the markets see function self.remove_Markets
# CREATE DIRECTORIES IF NOT IN EXISTENCE
if out_dir and not os.path.exists(self.out_dir):
os.makedirs(self.out_dir)
self.log_dir = os.path.join(self.out_dir, self.project_name + '_log')
# Fresh new log
os.system('rm -Rf ' + self.log_dir)
os.makedirs(self.log_dir)
# DEFINE LOG TOOL
self.log = logging.getLogger(self.project_name)
self.log.setLevel(logging.INFO)
self.log.handlers = [] # reset handlers
if verbose:
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
fh = logging.FileHandler(os.path.join(self.log_dir,
project_name + '.log'))
fh.setLevel(logging.INFO)
aformat = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
formatter = logging.Formatter(aformat)
fh.setFormatter(formatter)
self.log.addHandler(fh)
if verbose:
ch.setFormatter(formatter)
self.log.addHandler(ch)
# RECORD OBJECT/PROJECT IDENTITY TO LOG
self.log.info('Ecospold2Matrix Processing')
try:
gitcommand = ["git", "log", "--pretty=format:%H", "-n1"]
githash = subprocess.check_output(gitcommand).decode("utf-8")
self.log.info("Current git commit: {}".format(githash))
except:
pass
self.log.info('Project name: ' + self.project_name)
# RECORD PROJECT PARAMETERS TO LOG
self.log.info('Unit process and Master data directory: ' + sys_dir)
self.log.info('Data saved in: ' + self.out_dir)
if self.lci_dir:
self.log.info('Official rolled-up life cycle inventories in: ' +
self.lci_dir)
if self.positive_waste:
self.log.info('Sign conventions changed to make waste flows '
'positive')
if self.prefer_pickles:
self.log.info('When possible, loads pickled data instead of'
' parsing ecospold files')
if self.nan2null:
self.log.info('Replace Not-a-Number instances with 0.0 in all'
' matrices')
if self.save_interm:
self.log.info('Pickle intermediate results to files')
self.log.info('Order processes based on: ' +
', '.join([i for i in self.PRO_order]))
self.log.info('Order elementary exchanges based on: ' +
', '.join([i for i in self.STR_order]))
database_name = self.project_name + '_' + self.__DB_CHARACTERISATION
os.system('rm ' + database_name)
try:
self.conn = sqlite3.connect(
self.project_name + '_' + self.__DB_CHARACTERISATION)
self.initialize_database()
except:
self.log.warning("Could not establish connection to database")
pass
self.conn.commit()
# =========================================================================
# MAIN FUNCTIONS
def ecospold_to_Leontief(self, fileformats=None, with_absolute_flows=False,
lci_check=False, rtol=5e-2, atol=1e-5, imax=3,
characterisation_file=None,
ardaidmatching_file=None):
""" Recasts an full ecospold dataset into normalized symmetric matrices
Args:
-----
* fileformats : List of file formats in which to save data
[Default: None, save to all possible file formats]
Options: 'Pandas' --> pandas dataframes
'csv' --> text with separator = '|'
'SparsePandas' --> sparse pandas dataframes
'SparseMatrix' --> scipy AND matlab sparse
'SparseMatrixForArda' --> with special
background
variable names
* with_absolut_flow: If true, produce not only coefficient matrices (A
and F) but also scale them up to production
volumes to get absolute flows in separate
matrices. [default: false]
* lci_check : If true, and if lci_dir is not None, parse cummulative
lifecycle inventory data as self.E matrix (str x pro),
and use it for sanity check against calculated
cummulative LCI
* rtol : Initial (max) relative tolerance for comparing E with
calculated E
* atol : Initial (max) absolute tolerance for comparing E with
calculated E
* characterisation_file: name of file containing characterisation
factors
* ardaidmatching_file: name of file matching Arda Ids, Ecoinvent2 DSIDs
and ecoinvent3 UUIDs. Only useful for the Arda
project.
Generates:
----------
* Intermediate data: products, activities, flows, labels
* A matrix: Normalized, intermediate exchange Leontief coefficients
(pro x pro)
* F matrix: Normalized extensions, factor requirements (elementary
exchanges) for each process (str x pro)
* E matrix: [optionally] cummulative normalized lci data (str x pro)
(for quality check)
Returns:
-------
* None, save all matrices in the object, and to file
"""
# Read in system description
self.extract_products()
self.extract_activities()
self.get_flows()
self.get_labels()
# Clean up if necessary
self.__find_unsourced_flows()
if self.unsourced_flows is not None:
self.__fix_flow_sources()
self.__fix_missing_activities()
# Once all is well, add extra info to PRO and STR, and order nicely
self.complement_labels()
# Finally, assemble normalized, symmetric matrices
self.build_AF()
if with_absolute_flows:
self.scale_up_AF()
if characterisation_file is not None:
print("starting characterisation")
if 'LCIA_implementation' in characterisation_file:
self.log.info("Characterisation file seems to be ecoinvent"
" LCIA implementation. Will apply simple name"
" matching")
self.simple_characterisation_matching(characterisation_file)
else:
self.prepare_matching_load_parameters()
self.process_inventory_elementary_flows()
self.read_characterisation(characterisation_file)
self.populate_complementary_tables()
self.characterize_flows()
self.generate_characterized_extensions()
if ardaidmatching_file:
self.make_compatible_with_arda(ardaidmatching_file)
# Save system to file
self.save_system(fileformats)
# Read/load lci cummulative emissions and perform quality check
if lci_check:
self.get_cummulative_lci()
self.cummulative_lci_check(rtol, atol, imax)
self.log.info('Done running ecospold2matrix.ecospold_to_Leontief')
def ecospold_to_sut(self, fileformats=None, make_untraceable=False):
""" Recasts an unallocated ecospold dataset into supply and use tables
Args:
-----
* fileformats : List of file formats in which to save data
[Default: None, save to all possible file formats]
Options: 'Pandas' --> pandas dataframes
'SparsePandas' --> sparse pandas dataframes,
'SparseMatrix' --> scipy AND matlab sparse
'csv' --> text files
* make_untraceable: Whether or not to aggregate away the source
activity dimension, yielding a use table in which
products are no longer linked to their providers
[default: False; don't do it]
Generates:
----------
* Intermediate data: Products, activities, flows, labels
* V table Matrix of supply of product by activities
* U table Matrix of use of products by activities
(recorded for a given supply amount, from V)
* G_act Matrix of factor use by activities
(recorded for a given supply amount, from V)
* V_prodVol Matrix of estimated real production volumes,
arranged as suply table (potentially useful
to rescale U, V and G)
Returns:
-------
* None, save all matrices in the object, and to file
"""
# Extract data on producs and activities
self.extract_products()
self.extract_activities()
# Extract or load data on flows and labels
self.get_flows()
self.get_labels()
self.complement_labels()
# Arrange as supply and use
if self.remove_markets is True:
self.remove_Markets()
self.build_sut(make_untraceable)
# Save to file
self.save_system(fileformats)
self.log.info("Done running ecospold2matrix.ecospold_to_sut")
# =========================================================================
# INTERMEDIATE WRAPPER METHODS: parse or load data + pickle results or not
def get_flows(self):
""" Wrapper: load from pickle or call extract_flows() to read ecospold
files.
Behavious determined by:
------------------------
prefer_pickles: Whether or not to load flow lists from previous run
instead of (re)reading XML Ecospold files
save_interm: Whether or not to pickle flows to file for use in
another project run.
Generates:
----------
self.inflows
self.outflows
self.elementary_flows
self.prices
Returns:
--------
None, only defines within object
"""
filename = os.path.join(self.sys_dir, 'flows.pickle')
# EITHER LOAD FROM PREVIOUS ROUND...
if self.prefer_pickles and os.path.exists(filename):
# Read all flows
with open(filename, 'rb') as f:
[self.inflows,
self.elementary_flows,
self.outflows,
self.prices] = pickle.load(f)
# Log event
sha1 = self.__hash_file(f)
msg = "{} loaded from {} with SHA-1 of {}"
self.log.info(msg.format('Flows', filename, sha1))
# ...OR EXTRACT FROM ECOSPOLD DATA..
else:
self.extract_flows()
# optionally, pickle for further use
if self.save_interm:
with open(filename, 'wb') as f:
pickle.dump([self.inflows,
self.elementary_flows,
self.outflows,
self.prices], f)
# Log event
sha1 = self.__hash_file(filename)
msg = "{} saved in {} with SHA-1 of {}"
self.log.info(msg.format('Flows', filename, sha1))
def get_labels(self):
"""
Wrapper: load from pickle, or call methods to build labels from scratch
Behaviour determined by:
------------------------
* prefer_pickles: Whether or not to load flow lists from previous run
instead of (re)reading XML Ecospold files
* save_interm: Whether or not to pickle flows to file for use in
another project run.
Generates:
----------
* PRO: metadata on each process, i.e. production of each product
by each activity.
* STR: metadata on each stressor (or elementary exchange, factor of
production)
Returns:
--------
* None, only defines within object
NOTE:
-----
* At this stage, labels are at the strict minimum (ID, name) to
facilitate the addition of new processes or stressors, if need be, to
"patch" inconsistencies in the dataset. Once all is sorted out, more
data from product, activities, and elementary_flow descriptions are
added to the labels in self.complement_labels()
"""
filename = os.path.join(self.sys_dir, 'rawlabels.pickle')
# EITHER LOAD FROM PREVIOUS ROUND...
if self.prefer_pickles and os.path.exists(filename):
# Load from pickled file
with open(filename, 'rb') as f:
self.PRO, self.STR = pickle.load(f)
# Log event
sha1 = self.__hash_file(f)
msg = "{} loaded from {} with SHA-1 of {}"
self.log.info(msg.format('Labels', filename, sha1))
# OR EXTRACT FROM ECOSPOLD DATA...
else:
self.build_PRO()
self.build_STR()
# and optionally pickle for further use
if self.save_interm:
with open(filename, 'wb') as f:
pickle.dump([self.PRO, self.STR], f)
# Log event
sha1 = self.__hash_file(filename)
msg = "{} saved in {} with SHA-1 of {}"
self.log.info(msg.format('Labels', filename, sha1))
def get_cummulative_lci(self):
""" Wrapper: load from pickle or call build_E() to read ecospold files.
Behaviour determined by:
------------------------
* prefer_pickles: Whether or not to load flow lists from previous run
instead of (re)reading XML Ecospold files
* save_interm: Whether or not to pickle flows to file for use in
another project run.
* lci_dir: Directory where cummulative LCI ecospold are
Generates:
----------
* E: cummulative LCI emissions matrix
Returns:
--------
* None, only defines within object
"""
filename = os.path.join(self.lci_dir, 'lci.pickle')
# EITHER LOAD FROM PREVIOUS ROUND...
if self.prefer_pickles and os.path.exists(filename):
with open(filename, 'rb') as f:
self.E = pickle.load(f)
# log event
sha1 = self.__hash_file(f)
msg = "{} loaded from {} with SHA-1 of {}"
self.log.info(msg.format('Cummulative LCI', filename, sha1))
# OR BUILD FROM ECOSPOLD DATA...
else:
self.build_E()
# optionally, pickle for further use
if self.save_interm:
with open(filename, 'wb') as f:
pickle.dump(self.E, f)
# log event
sha1 = self.__hash_file(filename)
msg = "{} saved in {} with SHA-1 of {}"
self.log.info(msg.format('Cummulative LCI', filename, sha1))
# =========================================================================
# PARSING METHODS: the hard work with xml files
def extract_products(self):
""" Parses INTERMEDIATEEXCHANGE file to extract core data on products:
Id's, name, unitID, unitName.
Args: None
----
Returns: None
-------
Generates: self.products
----------
Credit:
------
This function incorporates/adapts code from Brightway2data, i.e., the
method extract_technosphere_metadata from class Ecospold2DataExtractor
"""
# The file to parse
fp = os.path.join(self.sys_dir, 'MasterData', self.__INTERMEXCHANGE)
assert os.path.exists(fp), "Can't find " + self.__INTERMEXCHANGE
def extract_metadata(o):
""" Subfunction to get the data from lxml root object """
# Get list of id, name, unitId, and unitName for all intermediate
# exchanges
# Added: CPC code (AJ)
try:
cpc = [o.classification[i].classificationValue for i in
range(len(o.classification)) if
o.classification[i].classificationSystem == 'CPC'][0]
except IndexError:
cpc = ''
return {'productName': o.name.text,
'unitName': o.unitName.text,
'productId': o.get('id'),
'unitId': o.get('unitId'),
'CPCCode': str(cpc)}
# Parse XML file
with open(fp, 'r', encoding="utf-8") as fh:
root = objectify.parse(fh).getroot()
pro_list = [extract_metadata(ds) for ds in root.iterchildren()]
# Convert this list into a dataFrame
self.products = pd.DataFrame(pro_list)
self.products.index = self.products['productId']
# Log event
sha1 = self.__hash_file(fp)
msg = "Products extracted from {} with SHA-1 of {}"
self.log.info(msg.format(self.__INTERMEXCHANGE, sha1))
def extract_activities(self):
""" Parses ACTIVITYINDEX file to extract core data on activities:
Id's, activity type, startDate, endDate
Args: None
----
Returns: None
--------
Generates: self.activities
---------
"""
# Parse XML file describing activities
activity_file = os.path.join(self.sys_dir,
'MasterData',
self.__ACTIVITYINDEX)
root = ET.parse(activity_file).getroot()
# Get list of activities and their core attributes
act_list = []
for act in root:
act_list.append([act.attrib['id'],
act.attrib['activityNameId'],
act.attrib['specialActivityType'],
act.attrib['startDate'],
act.attrib['endDate']])
# Remove any potential duplicates
act_list, _, _, _ = self.__deduplicate(act_list, 0, 'activity_list')
# Convert to dataFrame
self.activities = pd.DataFrame(act_list,
columns=('activityId',
'activityNameId',
'activityType',
'startDate',
'endDate'),
index=[row[0] for row in act_list])
self.activities['activityType'
] = self.activities['activityType'].astype(int)
# Log event
sha1 = self.__hash_file(activity_file)
msg = "{} extracted from {} with SHA-1 of {}"
self.log.info(msg.format('Activities', self.__ACTIVITYINDEX, sha1))
def extract_flows(self):
""" Extracts of all intermediate and elementary flows
Args: None
----
Returns: None
-------
Generates:
----------
self.inflows: normalized product (intermediate) inputs
self.elementary_flows: normalized elementary flows
self.outflows: normalized product (intermediate) outputs
"""
# Initialize empty lists
inflow_list = []
outflow_list = []
elementary_flows = []
product_price_list = []
# Get list of ecoSpold files to process
data_folder = os.path.join(self.sys_dir, 'datasets')
spold_files = glob.glob(os.path.join(data_folder, '*.spold'))
# Log event
self.log.info('Processing {} files in {}'.format(len(spold_files),
data_folder))
# ONE FILE AT A TIME
for sfile in spold_files:
# Get activityId from file name
current_file = os.path.basename(sfile)
current_id = os.path.splitext(current_file)[0]
# For each file, find flow data
root = etree.parse(sfile).getroot()
child_ds = root.find(self.__PRE + 'childActivityDataset')
if child_ds is None:
child_ds = root.find(self.__PRE + 'activityDataset')
flow_ds = child_ds.find(self.__PRE + 'flowData')
# GO THROUGH EACH FLOW IN TURN
for entry in flow_ds:
# Get magnitude of flow
try:
_amount = float(entry.attrib.get('amount'))
except:
# Get ID of failed amount
_fail_id = entry.attrib.get('elementaryExchangeId',
'not found')
if _fail_id == 'not found':
_fail_id = entry.attrib.get('intermediateExchangeId',
'not found')
# Log failure
self.log.warn("Parser warning: flow in {0} cannot be"
" converted' 'to float. Id: {1} - amount:"
" {2}".format(str(current_file),
_fail_id,
_amount))
continue
if _amount == 0: # Ignore entries of magnitude zero
continue
# GET OBJECT, DESTINATION AND/OR ORIGIN OF EACH FLOW
# ... for elementary flows
if entry.tag == self.__PRE + 'elementaryExchange':
elementary_flows.append([
current_id,
entry.attrib.get('elementaryExchangeId'),
_amount])
elif entry.tag == self.__PRE + 'intermediateExchange':
# ... or product use
if entry.find(self.__PRE + 'inputGroup') is not None:
inflow_list.append([
current_id,
entry.attrib.get('activityLinkId'),
entry.attrib.get('intermediateExchangeId'),
_amount])
# ... or product supply.
elif entry.find(self.__PRE + 'outputGroup') is not None:
outflow_list.append([
current_id,
entry.attrib.get('intermediateExchangeId'),
_amount,
entry.attrib.get('productionVolumeAmount'),
entry.find(self.__PRE + 'outputGroup').text])
# ... if output get the price info if it is a primary product
product_property_path = self.__PRE+'property[@propertyId ="38f94dd1-d5aa-41b8-b182-c0c42985d9dc"]'
if entry.findall(product_property_path) is not None:
for elem in entry.findall(product_property_path):
price = elem.attrib.get('amount')
for nextlevelelem in elem:
if nextlevelelem.tag == self.__PRE+'name':
name = nextlevelelem.text
elif nextlevelelem.tag == self.__PRE+'unitName':
unit = nextlevelelem.text
#print(nextlevelelem.tag,': ', nextlevelelem.text)
product_price_list.append([current_id,
entry.attrib.get('intermediateExchangeId'),
name, price, unit,
entry.find(self.__PRE + 'outputGroup').text])
#print(current_id,entry.attrib.get('intermediateExchangeId'),name, price, unit, entry.find(self.__PRE + 'outputGroup').text])
# Check for duplicates in outputflows
# there should really only be one output flow per activity
outflow_list, _, _, _ = self.__deduplicate(outflow_list,
0,
'outflow_list')
# CONVERT TO DATAFRAMES
self.inflows = pd.DataFrame(inflow_list, columns=['fileId',
'sourceActivityId',
'productId',
'amount'])
self.elementary_flows = pd.DataFrame(elementary_flows,
columns=['fileId',
'elementaryExchangeId',
'amount'])
out = pd.DataFrame(outflow_list,
columns=['fileId',
'productId',
'amount',
'productionVolume',
'outputGroup'],
index=[row[0] for row in outflow_list])
out['productionVolume'] = out['productionVolume'].astype(float)
out['outputGroup'] = out['outputGroup'].astype(int)
self.outflows = out
prices = pd.DataFrame(product_price_list,
columns = ['fileId',
'productId',
'name',
'amount',
'unit',
'outputGroup'],
index=[row[0] for row in product_price_list])
prices['amount'] = prices['amount'].astype(float)
prices['outputGroup'] = prices['outputGroup'].astype(int)
self.prices = prices
def build_STR(self):
""" Parses ElementaryExchanges.xml to builds stressor labels
Args: None
----
Behaviour influenced by:
------------------------
* self.STR_order: Determines how labels are ordered
Returns: None
-------
Generates: self.STR: DataFrame with stressor Id's for index
Credit:
-------
This function incorporates/adapts code from Brightway2data, that is,
the classmethod extract_biosphere_metadata from Ecospold2DataExtractor
"""
# File to parse
fp = os.path.join(self.sys_dir, 'MasterData', self.__ELEXCHANGE)
assert os.path.exists(fp), "Can't find ElementaryExchanges.xml"
def extract_metadata(o):
""" Subfunction to extract data from lxml root object """
return {
'id': o.get('id'),
'name': o.name.text,
'unit': o.unitName.text,
'cas': o.get('casNumber'),
'comp': o.compartment.compartment.text,
'subcomp': o.compartment.subcompartment.text
}
# Extract data from file
with open(fp, 'r', encoding="utf-8") as fh:
root = objectify.parse(fh).getroot()
self.STR = _df([extract_metadata(i) for i in root.iterchildren()])
# organize in pandas DataFrame
self.STR.index = self.STR['id']
self.STR = self.STR.reindex_axis(['id',
'name',
'unit',
'cas',
'comp',
'subcomp'], axis=1)
self.STR = self.STR.sort_values(by=self.STR_order)
# Log event
sha1 = self.__hash_file(fp)
msg = "{} extracted from {} with SHA-1 of {}"
self.log.info(msg.format('Elementary flows', self.__ELEXCHANGE, sha1))
def build_PRO(self):
""" Builds minimalistic intermediate exchange process labels
This functions parses all files in dataset folder. The list is
returned as pandas DataFrame. The index of the DataFrame is the
filename of the files in the DATASET folder.
Args: None
----
Behaviour influenced by:
------------------------
* self.PRO_order: Determines how labels are ordered
Returns: None
-------
Generates: self.PRO: DataFrame with file_Id's for index
----------
"""
# INITIALIZE
# ----------
# Use ecospold filenames as indexes (they combine activity Id and
# reference-product Id)
data_folder = os.path.join(self.sys_dir, 'datasets')
spold_files = glob.glob(os.path.join(data_folder, '*.spold'))
_in = [os.path.splitext(os.path.basename(fn))[0] for fn in spold_files]
# Initialize empty DataFrame
PRO = pd.DataFrame(index=_in, columns=('activityId',
'productId',
'activityName',
'ISIC',
'EcoSpoldCategory',
'geography',
'technologyLevel',
'macroEconomicScenario'))
# LOOP THROUGH ALL FILES TO EXTRACT ADDITIONAL DATA
# -------------------------------------------------
# Log event
if len(spold_files) > 1000:
msg_many_files = 'Processing {} files - this may take a while ...'
self.log.info(msg_many_files.format(len(spold_files)))
#print('One step further')
for sfile in spold_files:
#print(sfile)
# Remove filename extension
file_index = os.path.splitext(os.path.basename(sfile))[0]
#print(file_index)
# Parse xml tree
root = ET.parse(sfile).getroot()
# Record product Id
if self.unlinked == True:
#objectify is a very handy way to parse an xml tree
#into a python object which is more easily accsesible
rroot = objectify.parse(sfile).getroot()
if hasattr(rroot, "activityDataset"):
stem = rroot.activityDataset
else:
stem = rroot.childActivityDataset
#loop through the intermediate exchanges to find the ref. flow
#might be a better/smarter way but don't know it yet
for flow in stem.flowData.intermediateExchange:
if hasattr(flow, 'outputGroup'):
if flow.outputGroup == 0:
PRO.loc[file_index, 'productId'] = flow.attrib[
'intermediateExchangeId']
#For the unlnked data the file name does not
#feature the _productID anymore, so loop through the
#flow data to find the reference flow.
break #An activity has only one reference flow by
#construction so break out of loop after we found it
del rroot
else:
PRO.ix[file_index, 'productId'] = file_index.split('_')[1]
#if the data are linked, the product name is in the spold files
#print(file_index, sfile)
# Find activity dataset
child_ds = root.find(self.__PRE + 'childActivityDataset')
if child_ds is None:
child_ds = root.find(self.__PRE + 'activityDataset')
activity_ds = child_ds.find(self.__PRE + 'activityDescription')
# Loop through activity dataset
for entry in activity_ds:
# Get name, id, etc
if entry.tag == self.__PRE + 'activity':
PRO.ix[file_index, 'activityId'] = entry.attrib['id']
PRO.ix[file_index, 'activityName'] = entry.find(
self.__PRE + 'activityName').text
continue
# Get classification codes
if entry.tag == self.__PRE + 'classification':
if 'ISIC' in entry.find(self.__PRE +
'classificationSystem').text:
PRO.ix[file_index, 'ISIC'] = entry.find(
self.__PRE + 'classificationValue').text
if 'EcoSpold' in entry.find(
self.__PRE + 'classificationSystem').text:
PRO.ix[file_index, 'EcoSpoldCategory'
] = entry.find(self.__PRE +
'classificationValue').text
continue
# Get geography
if entry.tag == self.__PRE + 'geography':
PRO.ix[file_index, 'geography'
] = entry.find(self.__PRE + 'shortname').text
continue
# Get Technology
try:
if entry.tag == self.__PRE + 'technology':
PRO.ix[file_index, 'technologyLevel'
] = entry.attrib['technologyLevel']
continue
except:
# Apparently it is not a mandatory field in ecospold2.
# Skip if absent
pass
# Find MacroEconomic scenario
if entry.tag == self.__PRE + 'macroEconomicScenario':
PRO.ix[file_index, 'macroEconomicScenario'
] = entry.find(self.__PRE + 'name').text
continue
# quality check of id and index
if file_index.split('_')[0] != PRO.ix[file_index, 'activityId']:
self.log.warn('Index based on file {} and activityId in the'
' xml data are different'.format(str(sfile)))
# Final touches and save to self
PRO['technologyLevel'] = PRO['technologyLevel'].fillna(0).astype(int)
for i in self.__TechnologyLevels.index:
bo = PRO['technologyLevel'] == i
PRO.ix[bo, 'technologyLevel'] = self.__TechnologyLevels[i]
self.PRO = PRO.sort_values(by=self.PRO_order)
def extract_old_labels(self, old_dir, sep='|'):
""" Read in old PRO, STR and IMP labels csv-files from directory
self.STR_old must be defined with:
* with THREE name columns called name, name2, name3
* cas, comp, subcomp, unit
* ardaid, i.e., the Id that we wish to re-use in the new dataset
"""
# Read in STR
path = glob.glob(os.path.join(old_dir, '*STR*.csv'))[0]
self.STR_old = pd.read_csv(path, sep=sep)
# Read in PRO
path = glob.glob(os.path.join(old_dir, '*PRO*.csv'))[0]
self.PRO_old = pd.read_csv(path, sep=sep)
# Read in IMP
path = glob.glob(os.path.join(old_dir, '*IMP*.csv'))[0]
self.IMP_old = pd.read_csv(path, sep=sep)
# =========================================================================
# CLEAN UP FUNCTIONS: if imperfections in ecospold data
def __find_unsourced_flows(self):
"""
find input/use flows that do not have a specific supplying activity.
It determines the traceable or untraceable character of a product flow
based on the sourceActivityId field.
Depends on:
----------
* self.inflows (from self.get_flows or self.extract_flows)
* self.products [optional] (from self.extract_products)
* self.PRO [optional] (from self.get_labels or self.build_PRO)
Generates:
----------
* self.unsourced_flows: dataFrame w/ descriptions of unsourced flows
Args: None
----
Returns: None
--------
"""
# Define boolean vector of product flows without source
nuns = np.equal(self.inflows['sourceActivityId'].values, None)
unsourced_flows = self.inflows[nuns]
# add potentially useful information from other tables
# (not too crucial)
try:
unsourced_flows = self.inflows[nuns].reset_index().merge(
self.PRO[['activityName', 'geography', 'activityType']],
left_on='fileId', right_index=True)
unsourced_flows = unsourced_flows.merge(
self.products[['productName', 'productId']],
on='productId')
# ... and remove useless information
unsourced_flows.drop(['sourceActivityId'], axis=1, inplace=True)
except:
pass
# Log event and save to self
if np.any(nuns):
self.log.warn('Found {} untraceable flows'.format(np.sum(nuns)))
self.unsourced_flows = unsourced_flows
else:
self.log.info('OK. No untraceable flows.')
def __fix_flow_sources(self):
""" Try to find source activity for every product input-flow
Dependencies:
-------------
* self.unsourced_flows (from self.__find_unsourced_flows)
* self.PRO (from self.get_labels or self.build_PRO)
* self.inflows (from self.get_flows or self.extract_flows)
Potentially modifies all three dependencies above to assign unambiguous
source for each product flow, following these rules:
1) If only one provider in dataset, pick this one, even if geography is
wrong
2) Else if only one producer and one market, pick the market (as the
producer clearly sells to the sole market), regardless of geography
3) Elseif one market in right geography, always prefer that to any
other.
4) Elseif only one producer with right geography, prefer that one.
5) Otherwise, no unambiguous answer, leave for user to fix manually
"""
# Proceed to find clear sources for these flows, if at all possible
for i in self.unsourced_flows.index:
aflow = self.unsourced_flows.iloc(i)
print(aflow)
pdb.set_trace()
# Boolean vectors for relating the flow under investigation with
# PRO, either in term of which industries require the product
# in question (boPro), or the geographical location of the flow
# (boGeo) or whether a the source activity is a market or an
# ordinary activity (boMark), or a compbination of the above
boPro = (self.PRO.productId == aflow.productId).values
boGeo = (self.PRO.geography == aflow.geography).values
boMark = (self.PRO.activityType == '1').values
boMarkGeo = np.logical_and(boGeo, boMark)
boProGeo = np.logical_and(boPro, boGeo)
boProMark = np.logical_and(boPro, boMark)
act_id = '' # goal: finding a plausible value for this variable
# If it is a "normal" activity that has this input flow
if aflow.activityType == '0':
# Maybe there are NO producers, period.
if sum(boPro) == 0:
msg_noprod = "No producer found for product {}! Not good."
self.log.error(msg_noprod.format(aflow.productId))
self.log.warning("Creation of dummy producer not yet"
" automated")
# Maybe there is no choice, only ONE producer
elif sum(boPro) == 1:
act_id = self.PRO[boPro].activityId.values
# Log event
if any(boProGeo):
# and all is fine geographically for this one producer
self.log.warn("Exactly 1 producer ({}) for product {}"
", and its geography is ok for this"
" useflow".format(act_id,
aflow.productId))
else:
# but has wrong geography... geog proxy
wrong_geo = self.PRO[boPro].geography.values
msg = ("Exactly 1 producer ({}) for product {}, used"
" in spite of having wrong geography for {}:{}")
self.log.warn(msg.format(act_id,
aflow.productId,
aflow.fileId,
wrong_geo))
# Or there is only ONE producer and ONE market, no choice
# either, since then it is clear that this producer sells to
# the market.
elif sum(boPro) == 2 and sum(boProMark) == 1:
act_id = self.PRO[boProMark].activityId.values
# Log event
self.log.warn = ("Exactly 1 producer and 1 market"
" worldwide, so we source product {} from"
" market {}".format(aflow.productId,
act_id))
# or there are multiple sources, but only one market with the
# right geography
elif sum(boMarkGeo) == 1:
act_id = self.PRO[boMarkGeo].activityId.values
# Log event
msg_markGeo = ('Multiple sources, but only one market ({})'
' with right geography ({}) for product {}'
' use by {}.')
self.log.warn(msg_markGeo.format(act_id,
aflow.geography,
aflow.productId,
aflow.fileId))
# Or there are multiple sources, but only one producer with the
# right geography.
elif sum(boProGeo) == 1:
act_id = self.PRO[boProGeo].activityId.values
# Log event
msg_markGeo = ('Multiple sources, but only one producer'
' ({}) with right geography ({}) for'
' product {} use by {}.')
self.log.warn(msg_markGeo.format(act_id,
aflow.geography,
aflow.productId,
aflow.fileId))
else:
# No unambiguous fix, save options to file, let user decide
filename = ('potentialSources_' + aflow.productId +
'_' + aflow.fileId + '.csv')
debug_file = os.path.join(self.log_dir, filename)
# Log event
msg = ("No unambiguous fix. {} potential sources for "
"product {} use by {}. Will need manual fix, see"
" {}.")
self.log.error(msg.format(sum(boPro),
aflow.productId,
aflow.fileId,
debug_file))
self.PRO.ix[boPro, :].to_csv(debug_file, sep='|',
encoding='utf-8')
# Based on choice of act_id, record the selected source
# activity in inflows
self.inflows.ix[aflow['index'], 'sourceActivityId'] = act_id
elif aflow.activityType == '1':
msg = ("A market with untraceable inputs:{}. This is not yet"
" supported by __fix_flow_sources.")
self.log.error(msg.format(aflow.fileId)) # do something!
else:
msg = ("Activity {} is neither a normal one nor a market. Do"
" not know what to do with its" " untraceable flow of"
" product {}").format(aflow.fileId, aflow.productId)
self.log.error(msg) # do something!
def __fix_missing_activities(self):
""" Fix if flow sourced explicitly to an activity that does not exist
Identifies existence of missing production, and generate them
Depends on or Modifies:
-----------------------
* self.inflows (from self.get_flows or self.extract_flows)
* self.outflows (from self.get_flows or self.extract_flows)
* self.products (from self.extract_products)
* self.PRO (from self.get_labels or self.build_PRO)
Generates:
----------
self.missing_activities
"""
# Get set of all producer-product pairs in inflows
flows = self.inflows[['sourceActivityId', 'productId']].values.tolist()
set_flows = set([tuple(i) for i in flows])
# Set of all producer-product pairs in PRO
processes = self.PRO[['activityId', 'productId']].values.tolist()
set_labels = set([tuple(i) for i in processes])
# Identify discrepencies: missing producer-product pairs in labels
missing_activities = set_flows - set_labels
if missing_activities:
# Complain
msg = ("Found {} product flows traceable to sources that do not"
" produce right product. Productions will have to be added"
" to PRO, which now contain {} productions. Please see"
" missingProducers.csv.")
self.log.error(msg.format(len(missing_activities), len(self.PRO)))
# Organize in dataframe
miss = pd.DataFrame([[i[0], i[1]] for i in missing_activities],
columns=['activityId', 'productId'],
index=[i[0] + '_' + i[1]
for i in missing_activities])
activity_cols = ['activityId', 'activityName', 'ISIC']
product_cols = ['productId', 'productName']
copied_cols = activity_cols + product_cols
copied_cols.remove('productName')
# Merge to get info on missing flows
miss = miss.reset_index()
miss = miss.merge(self.PRO[activity_cols],
how='left',
on='activityId')
miss = miss.merge(self.products[product_cols],
how='left', on='productId')
miss = miss.set_index('index')
# Save missing flows to file for inspection
miss.to_csv(os.path.join(self.log_dir, 'missingProducers.csv'),
sep='|', encoding='utf-8')
# Insert dummy productions
for i, row in miss.iterrows():
self.log.warn('New dummy activity: {}'.format(i))
# add row to self.PRO
self.PRO.ix[i, copied_cols] = row[copied_cols]
self.PRO.ix[i, 'comment'] = 'DUMMY PRODUCTION'
# add new row in outputflow
self.outflows.ix[i, ['fileId', 'productId', 'amount']
] = [i, row['productId'], 1.0]
self.log.warn("Added dummy productions to PRO, which"
" is now {} processes long.".format(len(self.PRO)))
self.missing_activities = missing_activities
else:
self.log.info("OK. Source activities seem in order. Each product"
" traceable to an activity that actually does"
" produce or distribute this product.")
# =========================================================================
# ASSEMBLE MATRICES: now all parsed and cleanead, build the final matrices
def complement_labels(self):
""" Add extra data from self.products and self.activities to labels
Until complement_labels is run, labels are kept to the strict minimum
to facilitate tinkering with them if needed to fix discrepancies in
database. For example, adding missing activity or creating a dummy
process in labels is easier without all the (optional) extra meta-data
in there.
Once we have a consistent symmetric system, it's time to add useful
information to the matrix row and column labels for human readability.
Depends on:
-----------
self.products (from self.extract_products)
self.activities (from self.extract_activities)
Modifies:
---------
self.PRO (from self.get_labels or self.build_PRO)
self.STR (from self.get_labels or self.build_STR)
"""
self.PRO = self.PRO.reset_index()
# add data from self.products
self.PRO = self.PRO.merge(self.products,
how='left',
on='productId')
# add data from self.activities
self.PRO = self.PRO.merge(self.activities,
how='left',
on='activityId')
# Final touches and re-establish indexes as before
self.PRO = self.PRO.drop('unitId', axis=1).set_index('index')
# Re-sort processes (in fix-methods altered order/inserted rows)
self.PRO = self.PRO.sort_values(by=self.PRO_order)
self.STR = self.STR.sort_values(by=self.STR_order)
def build_AF(self):
"""
Arranges flows as Leontief technical coefficient matrix + extensions
Dependencies:
-------------
* self.inflows (from get_flows or extract_flows)
* self.elementary_flows (from get_flows or extract_flows)
* self.outflows (from get_flows or extract_flows)
* self.PRO [final version] (completed by self.complement_labels)
* self.STR [final version] (completed by self.complement_labels)
Behaviour determined by:
-----------------------
* self.positive_waste (determines sign convention to use)
* self.nan2null
Generates:
----------
* self.A
* self.F
"""
# By pivot tables, arrange all intermediate and elementary flows as
# matrices
self.log.info("Starting to assemble the matrices")
self.A = pd.pivot(
self.inflows['sourceActivityId'] + '_' + self.inflows['productId'],
self.inflows['fileId'],
self.inflows['amount']
).reindex(index=self.PRO.index, columns=self.PRO.index)
self.F = pd.pivot_table(self.elementary_flows,
values='amount',
index='elementaryExchangeId',
columns='fileId').reindex(index=self.STR.index,
columns=self.PRO.index)
# Take care of sign convention for waste
self.log.info("Starting normalizing matrices")
if self.positive_waste:
sign_changer = self.outflows['amount'] / self.outflows['amount'].abs()
self.A = self.A.mul(sign_changer, axis=0)
col_normalizer = 1 / self.outflows['amount'].abs()
else:
col_normalizer = 1 / self.outflows['amount']
# Normalize flows
# Reorder all rows and columns to fit labels
self.A = self.A.mul(col_normalizer, axis=1).reindex(
index=self.PRO.index,
columns=self.PRO.index)
self.F = self.F.mul(col_normalizer, axis=1).reindex(
index=self.STR.index,
columns=self.PRO.index)
self.log.info("fillna")
if self.nan2null:
self.A.fillna(0, inplace=True)
self.F.fillna(0, inplace=True)
def scale_up_AF(self):
""" Calculate absolute flow matrix from A, F, and production Volumes
In other words, scales up normalized system description to reach
recorded production volumes
Dependencies:
--------------
* self.outflows
* self.A
* self.F
Generates:
----------
* self.Z
* self.G_pro
"""
q = self.outflows['productionVolume']
self.Z = self.A.multiply(q, axis=1).reindex_like(self.A)
self.G_pro = self.F.multiply(q, axis=1).reindex_like(self.F)
def remove_Markets(self):
""" If the data are unlinked, markets are empty, that is nothing is
linked to the market yet, move the use flows (inflows, e.g. transport,
losses, etc) from the markets to use flows in the activities using
the reference product of the market.
"""
'''
#first check if the the data is actually considered to be unlinked.
#If not, warn the user and log. Potentially ask for user input
if self.unlinked is False:
var = input("Data are treated as linked and allocated, are you\n\
sure you want to remove the markets? [y/n]: ")
while var not in ['y','n']:
var = input("Invalid input! Please select a valid option!\n\
Data are treated as linked and allocated, are you\n\
sure you want to remove the markets? [y/n]: ")
if var == 'n':
self.log.info('Not removing markets, choice made through user input.')
return 0
#If the code gets here, var must be 'y':
self.log.warning("Remove markets: Data are linked and allocated,
removing markets nonetheless. Choice made through user input")
#This function does not do anything at the moment as we decided to use ocelot
#the linking/allocation.
'''
pass
def build_sut(self, make_untraceable=False):
""" Arranges flow data as Suply and Use Tables and extensions
Args:
-----
* make_untraceable: Whether or not to aggregate away the source
activity dimension, yielding a use table in which
products are no longer linked to their providers
[default: False; don't do it]
Dependencies:
-------------
* self.inflows
* self.outflows
* self.elementary_flows
Behaviour determined by:
-----------------------
* self.nan2null
Generates:
----------
* self.U
* self.V
* self.G_act
* self.V_prodVol
"""
def remove_productId_from_fileId(flows):
"""subfunction to remove the 'product_Id' part of 'fileId' data in
DataFrame, leaving only activityId and renaming columnd as such"""
fls = flows.replace('_[^_]*$', '', regex=True)
fls.rename(columns={'fileId': 'activityId'}, inplace=True)
return fls
# Refocus on activity rather than process (activityId vs fileId)
outfls = remove_productId_from_fileId(self.outflows)
elfls = remove_productId_from_fileId(self.elementary_flows)
infls = remove_productId_from_fileId(self.inflows)
infls.replace(to_replace=[None], value='', inplace=True)
# Pivot flows into Use and Supply and extension tables
self.U = pd.pivot_table(infls,
index=['sourceActivityId', 'productId'],
columns='activityId',
values='amount',
aggfunc=np.sum)
self.V = pd.pivot_table(outfls,
index='productId',
columns='activityId',
values='amount',
aggfunc=np.sum)
self.V_prodVol = pd.pivot_table(outfls,
index='productId',
columns='activityId',
values='productionVolume',
aggfunc=np.sum)
self.G_act = pd.pivot_table(elfls,
index='elementaryExchangeId',
columns='activityId',
values='amount',
aggfunc=np.sum)
# ensure all products are covered in supply table
self.V = self.V.reindex(index=self.products.index,
columns=self.activities.index)
self.V_prodVol = self.V_prodVol.reindex(index=self.products.index,
columns=self.activities.index)
self.U = self.U.reindex(columns=self.activities.index)
self.G_act = self.G_act.reindex(index=self.STR.index,
columns=self.activities.index)
if make_untraceable:
# Optionally aggregate away sourceActivity dimension, more IO-like
# Supply and untraceable-Use tables...
self.U = self.U.groupby(level='productId').sum()
self.U = self.U.reindex(index=self.products.index,
columns=self.activities.index)
self.log.info("Aggregated all sources in U, made untraceable")
if self.nan2null:
self.U.fillna(0, inplace=True)
self.V.fillna(0, inplace=True)
self.G_act.fillna(0, inplace=True)
# =========================================================================
# SANITY CHECK: Compare calculated cummulative LCI with official values
def build_E(self, data_folder=None):
""" Extract matrix of cummulative LCI from ecospold files
Dependencies:
------------
* self.PRO
* self.STR
Behaviour influenced by:
------------------------
* self.lci_dir
* self.nan2null
Generates:
----------
* self.E
"""
# Get list of ecospold files
if data_folder is None:
data_folder = self.lci_dir
spold_files = glob.glob(os.path.join(data_folder, '*.spold'))
if len(spold_files):
self.log.info( "Processing {} {} files from {}".format(
len(spold_files), 'cummulative LCI', data_folder))
else:
self.log.warning(
"Did not find any ecospold file in {}".format(data_folder))
# Initialize (huge) dataframe and get dimensions
self.log.info('creating E dataframe')
self.E = pd.DataFrame(index=self.STR.index,
columns=self.PRO.index, dtype=float)
initial_rows, initial_columns = self.E.shape
# LOOP OVER ALL FILES TO EXTRACT ELEMENTARY FLOWS
for count, sfile in enumerate(spold_files):
# Get to flow data
current_file = os.path.basename(sfile)
current_id = os.path.splitext(current_file)[0]
root = ET.parse(sfile).getroot()
child_ds = root.find(self.__PRE + 'childActivityDataset')
if child_ds is None:
child_ds = root.find(self.__PRE + 'activityDataset')
flow_ds = child_ds.find(self.__PRE + 'flowData')
# Find elemementary exchanges amongst all flows
for entry in flow_ds:
if entry.tag == self.__PRE + 'elementaryExchange':
try:
# Get amount
self.E.ix[entry.attrib['elementaryExchangeId'],
current_id] = float(entry.attrib['amount'])
except:
_amount = entry.attrib.get('amount', 'not found')
if _amount != '0':
msg = ("Parser warning: elementary exchange in {0}"
". elementaryExchangeId: {1} - amount: {2}")
self.log.warn(msg.format(str(current_file),
entry.attrib.get('elementaryExchangeId',
'not found'), _amount))
# keep user posted, as this loop can be quite long
if count % 300 == 0:
self.log.info('Completed {} files.'.format(count))
# Check for discrepancies in list of stressors and processes
final_rows, final_columns = self.E.shape
appended_rows = final_rows - initial_rows
appended_columns = final_columns - initial_columns
# and log
if appended_rows != 0:
self.log.warn('There are {} new processes relative to the initial'
'list.'.format(str(appended_rows)))
if appended_columns != 0:
self.log.warn('There are {} new impacts relative to the initial'
'list.'.format(str(appended_rows)))
if self.nan2null:
self.E.fillna(0, inplace=True)
def __calculate_E(self, A0, F0):
""" Calculate lifecycle cummulative inventories for comparison self.E
Args:
-----
* A0 : Leontief A-matrix (pandas dataframe)
* F0 : Environmental extension (pandas dataframe)
Returns:
--------
* Ec as pandas dataframe
Note:
--------
* Plan to move this as nested function of cummulative_lci_check
"""
A = A0.fillna(0).values
F = F0.fillna(0).values
I = np.eye(len(A))
Ec = F.dot(np.linalg.solve(I - A, I))
return pd.DataFrame(Ec, index=F0.index, columns=A0.columns)
def cummulative_lci_check(self, rtol=5e-2, atol=1e-5, imax=3):
"""
Sanity check: compares calculated and parsed cummulative LCI data
Args:
-----
* rtol: relative tolerance, maximum relative difference between
coefficients
* atol: absolute tolerance, maximum absolute difference between
coefficients
* imax: Number of orders of magnitude smaller than defined tolerance
that should be investigated
Depends on:
----------
* self.E
* self.__calculate_E()
"""
Ec = self.__calculate_E(self.A, self.F)
filename = os.path.join(self.log_dir,
'qualityCheckCummulativeLCI.shelf')
shelf = shelve.open(filename)
# Perform compareE() analysis at different tolerances, in steps of 10
i = 1
while (i <= imax) and (rtol > self.rtolmin):
bad = self.compareE(Ec, rtol, atol)
rtol /= 10
if bad is not None:
# Save bad flows in Shelf persistent dictionary
shelf['bad_at_rtol'+'{:1.0e}'.format(rtol)] = bad
i += 1
shelf.close()
sha1 = self.__hash_file(filename)
msg = "{} saved in {} with SHA-1 of {}"
self.log.info(msg.format('Cummulative LCI differences',
filename,
sha1))
def compareE(self, Ec, rtol=5e-2, atol=1e-5):
""" Compare parsed (official) cummulative lci emissions (self.E) with
lifecycle emissions calculated from the constructed matrices (Ec)
"""
thebad = None
# Compare the two matrices, see how many values are "close"
close = np.isclose(abs(self.E.fillna(0.0)), abs(Ec.fillna(0.0)), rtol, atol)
notclose = np.sum(~ close)
allcomp = np.sum(close) + notclose
self.log.info('There are {} lifecycle cummulative emissions out of {} that '
'differ by more than {} % AND by more than {} units '
'relative to the official value.'.format(notclose,
allcomp,
rtol*100,
atol))
if notclose:
# Compile a Series of all "not-close" values
thebad = pd.concat([self.E.mask(close).stack(), Ec.mask(close).stack()], 1)
del(Ec)
thebad.columns = ['official', 'calculated']
thebad.index.names = ['stressId', 'fileId']
# Merge labels to make it human readable
thebad = pd.merge(thebad.reset_index(),
self.PRO[['productName', 'activityName']],
left_on='fileId',
right_index=True)
thebad = pd.merge(thebad,
self.STR,
left_on='stressId',
right_index=True).set_index(['stressId',
'fileId'])
return thebad
# =========================================================================
# EXPORT AND HELPER FUNCTIONS
def save_system(self, file_formats=None):
""" Save normalized syste matrices to different formats
Args:
-----
* fileformats : List of file formats in which to save data
[Default: None, save to all possible file formats]
Options: 'Pandas' --> pandas dataframes
'csv' --> text with separator = '|'
'SparsePandas' --> sparse pandas dataframes
'SparseMatrix' --> scipy AND matlab sparse
'SparseMatrixForArda' --> with special
background
variable names
This method creates separate files for normalized, symmetric matrices
(A, F), scaled-up symmetric metrices (Z, G_pro), and supply and use
data (U, V, V_prod, G_act).
For Pandas and sparse pickled files, ghis method organizes the
variables in a dictionary, and pickles this dictionary to file.
For sparse pickled file, some labels are not turned into sparse
matrices (because not sparse) and are rather added as numpy arrays.
For Matlab sparse matrices, variables saved to mat file.
For CSV, a subdirectory is created to hold one text file per variable.
Returns:
-------
None
"""
# TODO: include characterisation factors in all formats and also
# non-normalized
def pickling(filename, adict, what_it_is, mat):
""" subfunction that handles creation of binary files """
# save dictionary as pickle
ext = '.gz.pickle'
with gzip.open(filename + ext, 'wb') as fout:
pickle.dump(adict, fout)
sha1 = self.__hash_file(filename + ext)
msg = "{} saved in {} with SHA-1 of {}"
self.log.info(msg.format(what_it_is, filename + ext, sha1))
# save dictionary also as mat file
if mat:
scipy.io.savemat(filename, adict, do_compression=True)
sha1 = self.__hash_file(filename + '.mat')
msg = "{} saved in {} with SHA-1 of {}"
self.log.info(msg.format(what_it_is, filename + '.mat', sha1))
def pickle_symm_norm(PRO=None, STR=None, IMP=None, A=None, F=None,
C=None, PRO_header=None, STR_header=None, IMP_header=None,
mat=False, for_arda_background=False):
""" nested function that prepares dictionary for symmetric,
normalized (coefficient) system description file """
if not for_arda_background:
adict = {'PRO': PRO,
'STR': STR,
'IMP': IMP,
'A': A,
'F': F,
'C': C,
'PRO_header': PRO_header,
'STR_header': STR_header,
'IMP_header': IMP_header
}
else:
adict = {'PRO_gen': PRO,
'STR': STR,
'IMP': IMP,
'A_gen': A,
'F_gen': F,
'C': C,
'PRO_header': PRO_header,
'STR_header': STR_header,
'IMP_header': IMP_header
}
self.log.info("about to write to file")
pickling(file_pr + '_symmNorm', adict,
'Final, symmetric, normalized matrices', mat)
def pickle_symm_scaled(PRO, STR, Z, G_pro, mat=False):
""" nested function that prepares dictionary for symmetric,
scaled (flow) system description file """
adict = {'PRO': PRO,
'STR': STR,
'Z': Z,
'G_pro': G_pro}
pickling(file_pr + '_symmScale', adict,
'Final, symmetric, scaled-up flow matrices', mat)
def pickle_sut(prod, act, STR, U, V, V_prodVol, G_act, mat=False):
""" nested function that prepares dictionary for SUT file """
adict = {'products': prod,
'activities': act,
'STR': STR,
'U': U,
'V': V,
'V_prodVol': V_prodVol,
'G_act': G_act}
pickling(file_pr + '_SUT', adict, 'Final SUT matrices', mat)
self.log.info("Starting to export to file")
# save as full Dataframes
format_name = 'Pandas'
if file_formats is None or format_name in file_formats:
file_pr = os.path.join(self.out_dir,
self.project_name + format_name)
if self.A is not None:
pickle_symm_norm(PRO=self.PRO,
STR=self.STR,
IMP=self.IMP,
A=self.A,
F=self.F,
C=self.C)
if self.Z is not None:
pickle_symm_scaled(self.PRO, self.STR, self.Z, self.G_pro)
if self.U is not None:
pickle_sut(self.products,
self.activities,
self.STR,
self.U, self.V, self.V_prodVol, self.G_act)
# save sparse Dataframes
format_name = 'SparsePandas'
if file_formats is None or format_name in file_formats:
file_pr = os.path.join(self.out_dir,
self.project_name + format_name)
if self.A is not None:
pickle_symm_norm(PRO=self.PRO,
STR=self.STR,
IMP=self.IMP,
A=self.A.to_sparse(),
F=self.F.to_sparse(),
C=self.C.to_sparse())
if self.Z is not None:
Z = self.Z.to_sparse()
G_pro = self.G_pro.to_sparse()
pickle_symm_scaled(self.PRO, self.STR, Z, G_pro)
if self.U is not None:
U = self.U.to_sparse()
V = self.V.to_sparse()
V_prodVol = self.V_prodVol.to_sparse()
G_act = self.G_act.to_sparse()
pickle_sut(self.products,
self.activities,
self.STR,
U, V, V_prodVol, G_act)
# save as sparse Matrices (both pickled and mat-files)
format_name = 'SparseMatrix'
# Check if we need special formatting for ARDA software
for_arda_background=False
try:
if 'SparseMatrixForArda' in file_formats:
for_arda_background=True
except TypeError:
pass
# Proceed
if (file_formats is None
or format_name in file_formats
or for_arda_background):
file_pr = os.path.join(self.out_dir,
self.project_name + format_name)
PRO = self.PRO.fillna('').values
STR = self.STR.fillna('').values
IMP = self.IMP.fillna('').values
PRO_header = self.PRO.columns.values
PRO_header = PRO_header.reshape((1, -1))
STR_header = self.STR.columns.values
STR_header = STR_header.reshape((1, -1))
C = scipy.sparse.csc_matrix(self.C.fillna(0))
IMP_header = self.IMP.columns.values
IMP_header = IMP_header.reshape((1, -1))
if self.A is not None:
A = scipy.sparse.csc_matrix(self.A.fillna(0))
F = scipy.sparse.csc_matrix(self.F.fillna(0))
pickle_symm_norm(PRO=PRO, STR=STR, IMP=IMP, A=A, F=F, C=C,
PRO_header=PRO_header, STR_header=STR_header,
IMP_header=IMP_header, mat=True,
for_arda_background=for_arda_background)
if self.Z is not None:
Z = scipy.sparse.csc_matrix(self.Z.fillna(0))
G_pro = scipy.sparse.csc_matrix(self.G_pro.fillna(0))
pickle_symm_scaled(PRO, STR, Z, G_pro, mat=True)
if self.U is not None:
U = scipy.sparse.csc_matrix(self.U.fillna(0))
V = scipy.sparse.csc_matrix(self.V.fillna(0))
V_prodVol = scipy.sparse.csc_matrix(self.V_prodVol.fillna(0))
G_act = scipy.sparse.csc_matrix(self.G_act.fillna(0))
products = self.products.values # to numpy array, not sparse
activities = self.activities.values
pickle_sut(products,
activities,
STR,
U, V, V_prodVol, G_act, mat=True)
# Write to CSV files
format_name = 'csv'
if file_formats is None or format_name in file_formats:
csv_dir = os.path.join(self.out_dir, 'csv')
if not os.path.exists(csv_dir):
os.makedirs(csv_dir)
self.PRO.to_csv(os.path.join(csv_dir, 'PRO.csv'))
self.STR.to_csv(os.path.join(csv_dir, 'STR.csv'))
if self.C is not None:
self.C.to_csv(os.path.join(csv_dir, 'C.csv'), sep='|')
self.IMP.to_csv(os.path.join(csv_dir, 'IMP.csv'), sep='|')
if self.A is not None:
self.A.to_csv(os.path.join(csv_dir, 'A.csv'), sep='|')
self.F.to_csv(os.path.join(csv_dir, 'F.csv'), sep='|')
if self.Z is not None:
self.Z.to_csv(os.path.join(csv_dir, 'Z.csv'), sep='|')
self.G_pro.to_csv(os.path.join(csv_dir, 'G_pro.csv'), sep='|')
if self.U is not None:
self.products.to_csv(os.path.join(csv_dir, 'products.csv'),
sep='|')
self.prices.to_csv(os.path.join(csv_dir, 'prices.csv'),
sep='|')
self.activities.to_csv(os.path.join(csv_dir, 'activities.csv'),
sep='|')
self.U.to_csv(os.path.join(csv_dir, 'U.csv'), sep='|')
self.V.to_csv(os.path.join(csv_dir, 'V.csv'), sep='|')
self.V_prodVol.to_csv(os.path.join(csv_dir, 'V_prodVol.csv'),
sep='|')
self.G_act.to_csv(os.path.join(csv_dir, 'G_act.csv'), sep='|')
self.log.info("Final matrices saved as CSV files in " + csv_dir)
def __hash_file(self, afile):
""" Get SHA-1 hash of binary file
Args:
-----
* afile: either name of file or file-handle of a file opened in
"read-binary" ('rb') mode.
Returns:
--------
* hexdigest hash of file, as string
"""
blocksize = 65536
hasher = hashlib.sha1()
# Sometimes used for afile as filename
try:
f = open(afile, 'rb')
opened_here = True
# Or afile can be a filehandle
except:
f = afile
opened_here = False
buf = f.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = f.read(blocksize)
if opened_here:
f.close()
return hasher.hexdigest()
def __deduplicate(self, raw_list, idcol=None, name=''):
""" Removes duplicate entries from a list
and then optionally warns of duplicate id's in the cleaned up list
Args:
-----
raw_list: a list
incol : in case of a list of lists, the "column" position for id's
name: string, just for logging messages
Return:
------
deduplicated: list without redundant entries
duplicates: list with redundant entries
id_deduplicated: list of ID's without redundancy
id_duplicates: list of redundant ID's
"""
# Initialization
deduplicated = []
duplicates = []
id_deduplicated = []
id_duplicates = []
# Find duplicate rows in list
for elem in raw_list:
if elem not in deduplicated:
deduplicated.append(elem)
else:
duplicates.append(elem)
# If an "index column" is specified, find duplicate indexes in
# deduplicated. In other words, amongst unique rows, are there
# non-unique IDs?
if idcol is not None:
indexes = [row[idcol] for row in deduplicated]
for index in indexes:
if index not in id_deduplicated:
id_deduplicated.append(index)
else:
id_duplicates.append(index)
# Log findings for further inspection
if duplicates:
filename = 'duplicate_' + name + '.csv'
with open(os.path.join(self.log_dir, filename), 'w') as fout:
duplicatewriter = csv.writer(fout, delimiter='|')
duplicatewriter.writerows(duplicates)
msg = 'Removed {} duplicate rows from {}, see {}.'
self.log.warn(msg.format(len(duplicates), name, filename))
if id_duplicates:
filename = 'duplicateID_' + name + '.csv'
with open(os.path.join(self.log_dir + filename), 'w') as fout:
duplicatewriter = csv.writer(fout, delimiter='|')
duplicatewriter.writerows(id_duplicates)
msg = 'There are {} duplicate Id in {}, see {}.'
self.log.warn(msg.format(len(id_duplicates), name, filename))
return deduplicated, duplicates, id_deduplicated, id_duplicates
# =========================================================================
# Characterisation factors matching
# =========================================================================
def simple_characterisation_matching(self, characterisation_file):
# Useful stuff
c = self.conn.cursor()
non_decimal = re.compile(r'[^0-9]')
basename = os.path.basename
def clean_up_columns(df):
""" Remove spaces, whitespace, and periods from column names
Also, return a list of all numbers in columns
"""
df.columns = [x.strip().replace(' ', '_') for x in df.columns]
col_version_numbers = [non_decimal.sub('', x) for x in df.columns]
df.columns = [x.replace('.', '') for x in df.columns]
return df, col_version_numbers
# Read and clean units
units = pd.read_excel(characterisation_file, 'units')
units, __ = clean_up_columns(units)
# Read and clean characterisation factors
cf = pd.read_excel(characterisation_file, 'CFs')
cf, col_version_numbers = clean_up_columns(cf)
# Try to find column with the matching CF and filename version number
# (e.g. CF 3.3 for LCIA_Implementation_3.3.xlsx)
file_version = non_decimal.sub('', basename(characterisation_file))
try:
cf_col = col_version_numbers.index(file_version)
msg = "Will use column {}, named {}, for characterisation factors"
self.log.info(msg.format(cf_col, cf.columns[cf_col]))
except:
cf_col = -3
msg = ("Could not match file version {} with CF versions."
" By default will use {}.")
self.log.warning(msg.format(file_version, cf.columns[cf_col]))
# Rename characterisation factor column
cols = cf.columns.tolist()
cols[cf_col] = 'CF'
cf.columns = cols
sep = '; '
self.log.info("Starting characterisation matching")
# Export to sqlite for matching
cf.to_sql('char', self.conn, if_exists='replace')
units.to_sql('units', self.conn, if_exists='replace')
self.STR.to_sql('stressors',
self.conn,
index_label='stressorId',
if_exists='replace')
# Complement ecoinvent elementary flows (stressors s) with matching
# characterisation factors (char c) and its units (units u)
sql_cmd = """ SELECT s.name, s.comp, s.subcomp, s.unit, s.stressorId,
c.method, c.category, c.indicator, c.CF,
u.impact_score_unit
FROM stressors s
LEFT JOIN char c
ON c.name = s.name AND c.compartment=s.comp
AND c.subcompartment=s.subcomp
AND s.unit=c.exchange_unit
LEFT JOIN units u
ON u.method=c.method AND u.category=c.category
AND u.indicator=c.indicator"""
C_long = pd.read_sql(sql_cmd, self.conn)
# Generate IMP labels
C_long['impact_label'] = (C_long.method + sep
+ C_long.category + sep
+ C_long.indicator + sep
+ C_long.impact_score_unit)
self.IMP = C_long[['impact_label',
'method',
'category',
'indicator',
'impact_score_unit']].drop_duplicates()
self.IMP.set_index('impact_label', inplace=True)
# Pivot and reindex to generate characterisation matrix
self.C = pd.pivot_table(C_long,
values='CF',
columns='stressorId',
index='impact_label')
self.C = self.C.reindex(self.IMP.index).reindex_axis(self.STR.index, 1)
self.log.info("Characterisation matching done. C matrix created")
def prepare_matching_load_parameters():
""" Load predefined values and parameters for characterisation matching
"""
# Read in parameter tables for CAS conflicts and known synonyms
def read_parameters(filename):
#resource = pkg_resources.resource_filename(__name__, filename)
resource = pkgutil.get_data(__name__, filename)
tmp = pd.read_csv(io.BytesIO(resource), sep='|', comment='#')
print(tmp)
return tmp.where(pd.notnull(tmp), None)
self._cas_conflicts = read_parameters('parameters/cas_conflicts.csv')
self._synonyms = read_parameters('parameters/synonyms.csv')
self._custom_factors = read_parameters('parameters/custom_factors.csv')
# MORE HARDCODED PARAMETERS
# Subcompartment matching
self.obs2char_subcomp = pd.DataFrame(
columns=["comp", "obs_sc", "char_sc"],
data=[["soil", "agricultural", "agricultural"],
["soil", "forestry", "forestry"],
["air", "high population density", "high population density"],
["soil", "industrial", "industrial"],
["air", "low population density", "low population density"],
["water", "ocean", "ocean"],
["water", "river", "river"],
["water", "river, long-term", "river"],
["air", "lower stratosphere + upper troposphere",
"low population density"],
["air", "low population density, long-term",
"low population density"]
])
# Default subcompartment when no subcomp match and no "unspecified"
# defined
self.fallback_sc = pd.DataFrame(
columns=["comp", "fallbacksubcomp"],
data=[[ 'water','river'],
[ 'soil', 'industrial'],
[ 'air', 'low population density']
])
self._header_harmonizing_dict = {
'subcompartment':'subcomp',
'Subcompartment':'subcomp',
'Compartment':'comp',
'Compartments':'comp',
'Substance name (ReCiPe)':'charName',
'Substance name (SimaPro)':'simaproName',
'ecoinvent_name':'inventoryName',
'recipe_name':'charName',
'simapro_name':'simaproName',
'CAS number': 'cas',
'casNumber': 'cas',
'Unit':'unit' }
# POTENTIAL OTHER ISSUES
## Names that don't fit with their cas numbers
#['2-butenal, (2e)-', '123-73-9', '2-butenal',
# 'cas of (more common) E configuration; cas of mix is'
# ' rather 4170-30-3'],
#['3-(1-methylbutyl)phenyl methylcarbamate', '2282-34-0',
# 'bufencarb', 'resolve name-cas collision in ReCiPe: CAS'
# ' points to specific chemical, not bufencarb (008065-36-9),'
# ' which is a mixture of this substance and phenol,'
# ' 3-(1-ethylpropyl)-, 1-(n-methylcarbamate)'],
#['chlordane (technical)', '12789-03-6', None,
# 'pure chlordane has cas 000057-74-9, and is also defined'
# ' for cis and trans. This one here seems to be more of a'
# ' mixture or low grade, no formula in scifinder'],
def initialize_database(self):
""" Define tables of SQlite database for matching stressors to
characterisation factors
"""
c = self.conn.cursor()
c.execute('PRAGMA foreign_keys = ON;')
self.conn.commit()
here = path.abspath(path.dirname(__file__))
with open(path.join(here,'initialize_database.sql'),'r') as f:
c.executescript(f.read())
self.conn.commit()
self.log.warning("obs2char_subcomps constraints temporarily relaxed because not full recipe parsed")
def clean_label(self, table, name_cols = ('name', 'name2')):
""" Harmonize notation and correct for mistakes in label sqlite table
"""
c = self.conn.cursor()
table= scrub(table)
# Harmonize label units
c.executescript( """
UPDATE {t} set unit=trim(unit);
update {t} set unit='m3' where unit='Nm3';
update {t} set unit='m2a' where unit='m2*year';
update {t} set unit='m3a' where unit='m3*year';
""".format(t=table))
# TRIM, AND HARMONIZE COMP, SUBCOMP, AND OTHER NAMES
c.executescript( """
UPDATE {t}
SET comp=trim((lower(comp))),
subcomp=trim((lower(subcomp))),
name=trim(name),
name2=trim(name2),
cas=trim(cas);
update {t} set subcomp='unspecified'
where subcomp is null or subcomp='(unspecified)' or subcomp='';
update {t} set subcomp='low population density'
where subcomp='low. pop.';
update {t} set subcomp='high population density'
where subcomp='high. pop.';
update {t} set comp='resource' where comp='raw';
update {t} set comp='resource' where comp='natural resource';
""".format(t=table))
try:
c.executescript("""
update {t} set impactId=replace(impactId,')','');
update {t} set impactId=replace(impactid,'(','_');
""".format(t=table))
except sqlite3.OperationalError:
# Not every label has a impactId column...
pass
# NULLIFY SOME COLUMNS IF ARGUMENTS OF LENGTH ZERO
for col in ('cas',) + name_cols:
col = scrub(col)
c.execute("""
update {t} set {c}=null
where length({c})=0;""".format(t=table, c=col))
c.execute(""" update {t}
set {c} = replace({c}, ', biogenic', ', non-fossil')
where {c} like '%, biogenic%'
""".format(t=table, c=col))
# Clean up names
c.executescript("""
update {t}
set name=replace(name,', in ground',''),
name2=replace(name2, ', in ground','')
where (name like '% in ground'
or name2 like '% in ground');
update {t}
set name=replace(name,', unspecified',''),
name=replace(name,', unspecified','')
where ( name like '%, unspecified'
OR name2 like '%, unspecified');
update {t}
set name=replace(name,'/m3',''),
name2=replace(name2,'/m3','')
where name like '%/m3';
""".format(t=table))
# DEFINE TAGS BASED ON NAMES
for tag in ('total', 'organic bound', 'fossil', 'non-fossil', 'as N'):
for name in name_cols:
c.execute(""" update {t} set tag='{ta}'
where ({n} like '%, {ta}');
""".format(t=table, ta=tag, n=scrub(name)))
self.conn.commit()
# Define more tags
c.execute("""
update {t} set tag='fossil'
where name like '% from soil or biomass stock'
or name2 like '% % from soil or biomass stock';
""".format(t=table))
c.execute("""
update {t} set tag='mix'
where name like '% compounds'
or name2 like '% compounds';
""".format(t=table))
c.execute("""
update {t} set tag='alpha radiation'
where (name like '%alpha%' or name2 like '%alpha%')
and unit='kbq';
""".format(t=table))
# Different types of "water", treat by name, not cas:
c.execute("""update {t} set cas=NULL
where name like '%water%'""".format(t=table))
# REPLACE FAULTY CAS NUMBERS CLEAN UP
for i, row in self._cas_conflicts.iterrows():
#if table == 'raw_char':
org_cas = copy.deepcopy(row.bad_cas)
aName = copy.deepcopy(row.aName)
if row.aName is not None and row.bad_cas is not None:
c.execute(""" update {t} set cas=?
where (name like ? or name2 like ?)
and cas=?""".format(t=table),
(row.cas, row.aName, row.aName, row.bad_cas))
elif row.aName is None:
c.execute("""select distinct name from {t}
where cas=?;""".format(t=table), (row.bad_cas,))
try:
aName = c.fetchone()[0]
except TypeError:
aName = '[]'
c.execute(""" update {t} set cas=?
where cas=?
""".format(t=table), (row.cas, row.bad_cas))
else: # aName, but no bad_cas specified
c.execute(""" select distinct cas from {t}
where (name like ? or name2 like ?)
""".format(t=table),(row.aName, row.aName))
try:
org_cas = c.fetchone()[0]
except TypeError:
org_cas = '[]'
c.execute(""" update {t} set cas=?
where (name like ? or name2 like ?)
""".format(t=table),
(row.cas, row.aName, row.aName))
if c.rowcount:
msg="Substituted CAS {} by {} for {} because {}"
self.log.info(msg.format( org_cas, row.cas, aName, row.comment))
self.conn.commit()
def process_inventory_elementary_flows(self):
"""Input inventoried stressor flow table (STR) to database and clean up
DEPENDENCIES :
self.STR must be defined
"""
# clean up: remove leading zeros in front of CAS numbers
self.STR.cas = self.STR.cas.str.replace('^[0]*','')
# export to tmp SQL table
c = self.conn.cursor()
self.STR.to_sql('dirty_inventory',
self.conn,
index_label='id',
if_exists='replace')
c.execute( """
INSERT INTO raw_inventory(id, name, comp, subcomp, unit, cas)
SELECT DISTINCT id, name, comp, subcomp, unit, cas
FROM dirty_inventory;
""")
self.clean_label('raw_inventory')
self.conn.commit()
def read_characterisation(self, characterisation_file):
"""Input characterisation factor table (STR) to database and clean up
"""
def xlsrange(wb, sheetname, rangename):
ws = wb.sheet_by_name(sheetname)
ix = xlwt.Utils.cellrange_to_rowcol_pair(rangename)
values = []
for i in range(ix[1],ix[3]+1):
values.append(tuple(ws.col_values(i, ix[0], ix[2]+1)))
return values
c = self.conn.cursor()
# check whether an extraction method has been written for reading the
# characterisation factor file
if 'ReCiPe111' in characterisation_file:
self.char_method='ReCiPe111'
picklename = re.sub('xlsx*$', 'pickle', characterisation_file)
else:
self.log.error("No method defined to read characterisation factors"
" from {}. Aborting.".format(characterisation_file))
#TODO: abort
# sheet reading parameters
hardcoded = [
{'name':'FEP' , 'rows':5, 'range':'B:M', 'impact_meta':'H4:M6'},
{'name':'MEP' , 'rows':5, 'range':'B:J', 'impact_meta':'H4:J6'},
{'name':'GWP' , 'rows':5, 'range':'B:P', 'impact_meta':'H4:P6'},
{'name':'ODP' , 'rows':5, 'range':'B:J', 'impact_meta':'H4:J6'},
{'name':'ODP' , 'rows':5, 'range':'B:G,N:P', 'impact_meta':'N4:P6'},
{'name':'AP' , 'rows':5, 'range':'B:M', 'impact_meta':'H4:M6'},
{'name':'POFP', 'rows':5, 'range':'B:M', 'impact_meta':'H4:M6'},
{'name':'PMFP', 'rows':5, 'range':'B:M', 'impact_meta':'H4:M6'},
{'name':'IRP' , 'rows':5, 'range':'B:M', 'impact_meta':'H4:M6'},
{'name':'LOP' , 'rows':5, 'range':'B:M', 'impact_meta':'H4:M6'},
{'name':'LOP' , 'rows':5, 'range':'B:G,Q:V', 'impact_meta':'Q4:V6'},
{'name':'LTP' , 'rows':5, 'range':'B:J', 'impact_meta':'H4:J6'},
{'name':'LTP' , 'rows':5, 'range':'B:G,N:P', 'impact_meta':'N4:P6'},
{'name':'WDP' , 'rows':5, 'range':'B:J', 'impact_meta':'H4:J6'},
{'name':'MDP' , 'rows':5, 'range':'B:M', 'impact_meta':'H4:M6'},
{'name':'FDP' , 'rows':5, 'range':'B:M', 'impact_meta':'H4:M6'},
{'name':'TP' , 'rows':5, 'range':'B:AE', 'impact_meta':'H4:AE6'}
]
headers = ['comp','subcomp','charName','simaproName','cas','unit']
if self.prefer_pickles and os.path.exists(picklename):
with open(picklename, 'rb') as f:
[imp, raw_char] = pickle.load(f)
else:
# Get all impact categories directly from excel file
print("reading for impacts")
self.log.info("CAREFUL! Make sure you shift headers to the right by"
" 1 column in FDP sheet of {}".format(characterisation_file))
wb = xlrd.open_workbook(characterisation_file)
imp =[]
for sheet in hardcoded:
print(imp)
imp = imp + xlsrange(wb, sheet['name'], sheet['impact_meta'])
imp = pd.DataFrame(columns=['perspective','unit', 'impactId'],
data=imp)
#imp.impactId = imp.impactId.str.replace(' ', '_')
imp.impactId = imp.impactId.str.replace('(', '_')
imp.impactId = imp.impactId.str.replace(')', '')
# GET ALL CHARACTERISATION FACTORS
raw_char = pd.DataFrame()
for i in range(len(hardcoded)):
sheet = hardcoded[i]
j = pd.io.excel.read_excel(characterisation_file,
sheet['name'],
skiprows=range(sheet['rows']),
parse_cols=sheet['range'])
# clean up a bit
j.rename(columns=self._header_harmonizing_dict, inplace=True)
try:
j.cas = j.cas.str.replace('^[0]*','')
except AttributeError:
pass
j.ix[:, headers] = j.ix[:, headers].fillna('')
j = j.set_index(headers).stack(dropna=True).reset_index(-1)
j.columns=['impactId','factorValue']
# concatenate
try:
raw_char = pd.concat([raw_char, j], axis=0, join='outer')
except NameError:
raw_char = j.copy()
except:
self.log.warning("Problem with concat")
# Pickle raw_char as read
self.log.info("Done with concatenating")
with open(picklename, 'wb') as f:
pickle.dump([imp, raw_char], f)
# Define numerical index
raw_char.reset_index(inplace=True)
# insert impacts to SQL
imp.to_sql('tmp', self.conn, if_exists='replace', index=False)
c.execute("""insert or ignore into impacts(
perspective, unit, impactId)
select perspective, unit, impactId
from tmp;""")
# insert raw_char to SQL
raw_char.to_sql('tmp', self.conn, if_exists='replace', index=False)
c.execute("""
insert into raw_char(
comp, subcomp, name, name2, cas, unit, impactId, factorValue)
select distinct comp, subcomp, charName, simaproName, cas,
unit, impactId, factorValue
from tmp;
""")
# RECIPE-SPECIFIC Pre-CLEAN UP
# add Chromium VI back, since it did NOT get read in the spreadsheet
# (Error512 in the spreadsheet)
c.executescript("""
create temporary table tmp_cr as select * from raw_char
where cas='7440-47-3';
update tmp_cr set id = NULL,
name='Chromium VI',
name2='Chromium VI',
cas='18540-29-9';
insert into raw_char select * from tmp_cr;
""")
self.log.info(
"Fixed the NaN values for chromium VI in ReCiPe spreadsheet,"
" assumed to have same toxicity impacts as chromium III.")
# Force copper in water to be ionic (cas gets changed as part of normal
# clean_label())
c.execute("""
update raw_char
set name='<NAME>', name2='<NAME>'
where comp='water' and name like 'copper'
""")
if c.rowcount:
self.log.info("For compatibility with ecoinvent, forced {} copper"
" emission to water to be ionic (Cu(2+)) instead"
" of neutral.".format(c.rowcount))
# MAJOR CLEANUP
self.clean_label('raw_char')
# COMPARTMENT SPECIFIC FIXES,
# i.e., ions out of water in char, or neutral in water in inventory
c.execute("""
UPDATE raw_char
SET cas='16065-83-1', name='Chromium III', name2='Chromium III'
WHERE cas='7440-47-3' AND comp='water' AND
(name LIKE 'chromium iii' OR name2 LIKE 'chromium iii')
""")
if c.rowcount:
self.log.info("Changed CAS changed one of the two names of {}"
" emissions of chromium III to water. Removes internal ambiguity"
" and resolves conflict with Ecoinvent.".format(c.rowcount))
# sort out neutral chromium
c.execute("""
update raw_char
set name='Chromium', name2='Chromium'
WHERE cas='7440-46-3' AND comp<>'water' AND
(name like 'chromium' or name2 like 'chromium')
""")
# add separate neutral Ni in groundwater, because exists in ecoinvent
c.executescript("""
create temporary table tmp_ni as select * from raw_char
where cas='14701-22-5' and
subcomp='river';
update tmp_ni set id = NULL, name='Nickel', name2='Nickel',
cas='7440-02-0';
insert into raw_char select * from tmp_ni;
""")
self.conn.commit()
def populate_complementary_tables(self):
""" Populate substances, comp, subcomp, etc. from inventoried flows
"""
self._synonyms.to_sql('synonyms', self.conn, if_exists='replace')
# Populate comp and subcomp
c = self.conn.cursor()
c.executescript(
"""
INSERT INTO comp(compName)
SELECT DISTINCT comp FROM raw_inventory
WHERE comp NOT IN (SELECT compName FROM comp);
INSERT INTO subcomp (subcompName)
SELECT DISTINCT subcomp FROM raw_inventory
WHERE subcomp IS NOT NULL
and subcomp not in (select subcompname from subcomp);
""")
c.executescript(
# 1. integrate compartments and subcompartments
"""
INSERT INTO comp(compName)
SELECT DISTINCT r.comp from raw_char as r
WHERE r.comp NOT IN (SELECT compName FROM comp);
insert into subcomp(subcompName)
select distinct r.subcomp
from raw_char as r
where r.subcomp not in (select subcompName from subcomp);
"""
)
# populate obs2char_subcomp with object attribute: the matching between
# subcompartments in inventories and in characterisation method
self.obs2char_subcomp.to_sql('obs2char_subcomps',
self.conn,
if_exists='replace',
index=False)
self.fallback_sc.to_sql('fallback_sc',
self.conn,
if_exists='replace',
index=False)
self.conn.commit()
c.executescript(
"""
-- 2.1 Add Schemes
INSERT or ignore INTO schemes(NAME) SELECT '{}';
INSERT OR IGNORE INTO schemes(NAME) SELECT 'simapro';
INSERT OR IGNORE INTO schemes(NAME) SELECT '{}';
""".format(self.version_name, self.char_method))
self.conn.commit()
def _integrate_old_labels(self):
"""
Read in old labels in order to reuse the same Ids for the same flows,
for backward compatibility of any inventory using the new dataset
REQUIREMENTS
------------
self.STR_old must be defined with:
* with THREE name columns called name, name2, name3
* cas, comp, subcomp, unit
* ardaid, i.e., the Id that we wish to re-use in the new dataset
RETURNS
-------
None
"""
# Fix column names and clean up CAS numbers in DataFrame
self.STR_old.rename(columns=self._header_harmonizing_dict, inplace=True)
self.STR_old.cas = self.STR_old.cas.str.replace('^[0]*','')
# save to tmp table in sqlitedb
c = self.conn.cursor()
self.STR_old.to_sql('tmp', self.conn, if_exists='replace', index=False)
# populate old_labels
c.executescript("""
INSERT INTO old_labels(ardaid,
name,
name2,
name3,
cas,
comp,
subcomp,
unit)
SELECT DISTINCT ardaid, name, name2, name3, cas, comp, subcomp, unit
FROM tmp;
""")
# clean up
self.clean_label('old_labels', ('name', 'name2', 'name3'))
# match substid by cas and tag
sql_command="""
update old_labels
set substid=(select distinct s.substid
from substances as s
where old_labels.cas=s.cas and
old_labels.tag like s.tag)
where old_labels.substId is null
and old_labels.cas is not null;
"""
self._updatenull_log(sql_command, 'old_labels', 'substid', log_msg=
"Matched {} with CAS from old_labels, out of {} unmatched rows." )
# match substid by name and tag matching
for name in ('name','name2', 'name3'):
sql_command="""
update old_labels
set substid=(select distinct n.substid
from names as n
where old_labels.{n} like n.name and
old_labels.tag like n.tag)
where old_labels.substId is null
;""".format(n=scrub(name))
self._updatenull_log(sql_command, 'old_labels', 'substid', log_msg=
"Matched {} with name and tag matching, out of {} unmatched rows from old_labels.")
# match substid by cas only
sql_command="""
update old_labels
set substid=(select distinct s.substid
from substances as s
where old_labels.cas=s.cas)
where old_labels.substId is null
and old_labels.cas is not null;
"""
self._updatenull_log(sql_command, 'old_labels', 'substid', log_msg=
"Matched {} from old_labels by CAS only, out of {} unmatched rows.")
self.conn.commit()
# match substid by name only
for name in ('name','name2', 'name3'):
sql_command = """
update old_labels
set substid=(select distinct n.substid
from names as n
where old_labels.{n} like n.name)
where substId is null
;""".format(n=scrub(name))
self._updatenull_log(sql_command, 'old_labels', 'substid', log_msg=
"Matched {} from old_labels by name only, out of {} unmatched rows.")
# document unmatched old_labels
unmatched = pd.read_sql("""
select * from old_labels
where substid is null;
""", self.conn)
if unmatched.shape[0]:
logfile = 'unmatched_oldLabel_subst.csv'
unmatched.to_csv(os.path.join(self.log_dir, logfile),
sep='|', encoding='utf-8')
msg = "{} old_labels entries not matched to substance; see {}"
self.log.warning(msg.format(unmatched.shape[0], logfile))
# save to file
self.conn.commit()
def characterize_flows(self, tables=('raw_char','raw_inventory')):
"""
Master function to characterize elementary flows
Args
----
* tables: tuple of the tables to be processes, typically a raw table of
* characterized flows (raw_char) and a table of inventoried elementary
* flows (raw_inventory)
IMPORTANT: because of the iterative treatment of synonyms and the use
of proxies to match as many flows as possible, it is best that the
tables tuple start with the table of characterized flows (raw_char)
"""
# Integrate substances based on CAS for each table
self._integrate_flows_withCAS(tables)
# Integrate substances by string matching and synonyms for each table
self._integrate_flows_withoutCAS(tables)
# Clean up, production of lists of inventory and characterised
# stressors, and compile table of characterisation factors
self._finalize_labels_and_factors(tables)
# Integrate old stressor list, notably to re-use old Ids
if self.STR_old is not None:
self._integrate_old_labels()
# Produce stressor label (STR), impact labels (IMP), and
# characterisation matrix (C)
self._characterisation_matching()
self.conn.commit()
def _update_labels_from_names(self, table):
""" Update Substance ID in labels based on name matching"""
c = self.conn.cursor()
self.conn.executescript(
"""
--- Match based on names
UPDATE OR ignore {t}
SET substid=(
SELECT n.substid
FROM names as n
WHERE ({t}.name like n.name or {t}.name2 like n.name)
AND {t}.tag IS n.tag
)
WHERE {t}.substid IS NULL
AND {t}.cas IS NULL;
""".format(t=scrub(table)));
# Match with known synonyms, in decreasing order of accuracy in
# approximation
for i in np.sort(self._synonyms.approximationLevel.unique()):
for j in [('aName', 'anotherName'),('anotherName', 'aName')]:
c.execute("""
UPDATE OR ignore {t}
SET substid=(
SELECT DISTINCT n.substid
FROM names as n, synonyms as s
WHERE ({t}.name like s.{c0}
OR {t}.name2 like s.{c0})
AND s.{c1} like n.name
AND {t}.tag IS n.tag
AND s.approximationLevel = ?)
WHERE {t}.substid IS NULL
AND {t}.cas IS NULL
""".format(t=scrub(table), c0=scrub(j[0]), c1=scrub(j[1])),
[str(i)])
self.conn.commit()
def _insert_names_from_labels(self, table):
"""Subfunction to handle names/synonyms in _integrate_flows_* methods
"""
c = self.conn.cursor()
c.executescript("""
--- Insert names
INSERT OR IGNORE INTO names (name, tag, substid)
SELECT DISTINCT name, tag, substId
FROM {t} where substid is not null
UNION
SELECT DISTINCT name2, tag, substId
FROM {t} where substid is not null;
---- INSERT OR IGNORE INTO names(name, tag, substid)
---- SELECT DISTINCT s.anotherName, n.tag, n.substid
---- FROM names n, synonyms s
---- WHERE s.aName LIKE n.name;
---- INSERT OR IGNORE INTO names(name, tag, substid)
---- SELECT DISTINCT s.aName, n.tag, n.substid
---- FROM names n, synonyms s
---- WHERE s.anotherName LIKE n.name;
""".format(t=scrub(table)))
def _integrate_flows_withCAS(self, tables=('raw_inventory', 'raw_char')):
""" Populate substances, comp, subcomp, etc. from inventoried flows
Can be seen as a subroutine of self.characterize_flows()
"""
# Populate comp and subcomp
c = self.conn.cursor()
for table in tables:
c.executescript(
# 2.2 A new substance for each new cas+tag
# this will automatically ignore any redundant cas-tag combination
"""
insert or ignore into substances(aName, cas, tag)
select distinct r.name, r.cas, r.tag FROM {t} AS r
WHERE r.cas is not null AND r.NAME IS NOT NULL
UNION
select distinct r.name2, r.cas, r.tag from {t} AS r
WHERE r.cas is not null AND r.name IS NULL
;
-- 2.4: backfill labels with substid based on CAS-tag
UPDATE OR ignore {t}
SET substid=(
SELECT s.substid
FROM substances as s
WHERE {t}.cas=s.cas
AND {t}.tag IS s.tag
)
WHERE {t}.substid IS NULL
;
""".format(t=scrub(table)))
self._insert_names_from_labels(table)
self.conn.commit()
def _integrate_flows_withoutCAS(self, tables=('raw_inventory', 'raw_char')):
""" populate substances and names tables from flows without cas
Can be seen as a subroutine of self.characterize_flows()
"""
c = self.conn.cursor()
# new substances for each new name-tags in one dataset
# update labels with substid from substances
for table in tables:
# update substid in labels by matching with names already defined
# catch any synonyms
self._update_labels_from_names(table)
# Insert any new synonym
self._insert_names_from_labels(table)
# Define new substances for those that remain
c.executescript("""
-- 2.5: Create new substances for the remaining flows
INSERT OR ignore INTO substances(aName, cas, tag)
SELECT DISTINCT name, cas, tag
FROM {t} r WHERE r.substid IS NULL AND r.name IS NOT NULL
UNION
SELECT DISTINCT name2, cas, tag
FROM {t} r WHERE r.substid IS NULL AND r.name IS NULL
;
-- 2.6: backfill labels with substid based on name-tag
UPDATE {t}
SET substid=(
SELECT s.substid
FROM substances s
WHERE ({t}.name like s.aName OR {t}.name2 like s.aName)
AND {t}.tag IS s.tag
)
WHERE substid IS NULL
;
""".format(t=scrub(table))) # 2.6
# insert new name-substid pairs into names
self._insert_names_from_labels(table)
# update substid in labels by matching with names already defined
self._update_labels_from_names(table)
self.conn.commit()
def _finalize_labels_and_factors(self, tables=('raw_char', 'raw_inventory')):
""" SubstID matching qualitiy checks, produce labels, populate factors
Can be seen as a subroutine of self.characterize_flows()
Prep work:
* Checks for mised synonyms in substid matching
* checks for near misses because of plurals
* Link Names to Schemes (Recipe*, ecoinvent, etc.) in nameHasScheme
Main tasks:
* Produce labels (labels_inventory, labels_char)
* Populate the factors table with factors of production
Post processing:
* Customize characterisation factors based on custom_factors.csv
parameter
* Identify conflicts
"""
c = self.conn.cursor()
# Check for apparent synonyms that have different substance Ids
# and log warning
c.executescript(
"""
select distinct r.name, n1.substid, r.name2, n2.substid
from raw_char r, names n1, names n2
where r.name=n1.name and r.name2=n2.name
and n1.substid <> n2.substid;
""")
missed_synonyms = c.fetchall()
if len(missed_synonyms):
self.log.warning("Probably missed on some synonym pairs")
print(missed_synonyms)
# Check for flows that have not been matched to substance ID and log
# warning
for table in tables:
c.execute(
"select * from {} where substid is null;".format(scrub(table)))
missed_flows = c.fetchall()
if len(missed_flows):
self.log.warning("There are missed flows in "+table)
print(missed_flows)
# Log any near matches that might have been missed because of some
# plural
c.execute(
"""
select * from Names as n1, Names as n2
where n1.name like n2.name||'s' and n1.substid <> n2.substid;
""")
missed_plurals = c.fetchall()
if len(missed_plurals):
self.log.warning("Maybe missed name matching because of plurals")
print(missed_plurals)
# Match Names with Schemes (Simapro, Recipe, Ecoinvent, etc.)
self.conn.executescript("""
--- match names with scheme of self.version_name
INSERT INTO nameHasScheme
SELECT DISTINCT n.nameId, s.schemeId from names n, schemes s
WHERE n.name in (SELECT DISTINCT name FROM raw_inventory)
and s.name='{}';
--- match names with scheme of self.char_method
insert into nameHasScheme
select distinct n.nameId, s.schemeId from names n, schemes s
where n.name in (select name from raw_char)
and s.name='{}';
--- match alternative name in characterisation method (name2) with
--- simapro scheme (hardcoded)
insert into nameHasScheme
select distinct n.nameId, s.schemeId
from names n, schemes s
where n.name in (select name2 from raw_char)
and s.name='simapro';
""".format(scrub(self.version_name), scrub(self.char_method)))
# For each table, prepare labels and, if applicable, populate
# factors table with characterisation factors
for i in range(len(tables)):
table = scrub(tables[i])
if 'inventory' in table:
t_out = 'labels_inventory'
else:
t_out = 'labels_char'
# Populate factors table
self.conn.commit()
c.execute(# only loose constraint on table, the better to
# identify uniqueness conflicts and log problems in a
# few lines (as soon as for-loop is over)
#
# TODO: fix the way methods is defined
"""
insert or ignore into factors(
substId, comp, subcomp, unit, impactId, factorValue, method)
select distinct
substId, comp, subcomp, unit, impactId, factorValue, '{c}'
from {t};
""".format(t=table, c=scrub(self.char_method))
)
# Prepare labels
self.conn.executescript("""
INSERT INTO {to}(
id, substId, name, name2, tag, comp, subcomp, cas, unit)
SELECT DISTINCT
id, substId, name, name2, tag, comp, subcomp, cas, unit
FROM {t};
""".format(t=table, to=t_out))
# Customize characterizations based on custom_factors.csv
for i,row in self._custom_factors.iterrows():
c.execute("""
UPDATE OR IGNORE factors
SET factorValue=?
WHERE impactId = ?
AND substid=(SELECT DISTINCT substid
FROM names WHERE name LIKE ?)
AND comp=? AND subcomp=? AND unit=?;""", (row.factorValue,
row.impactID, row.aName, row.comp, row.subcomp, row.unit))
if c.rowcount:
msg="Custimized {} factor to {} for {} ({}) in {} {}"
self.log.info(msg.format(row.impactID, row.factorValue,
row.aName, row.unit, row.comp, row.subcomp))
# Identify conflicting characterisation factors
sql_command = """ select distinct
f1.substid,
s.aName,
f1.comp,
f1.subcomp,
f1.unit,
f1.impactId,
f1.method,
f1.factorValue,
f2.factorValue
from factors f1, factors f2, substances s
where
f1.substId=f2.substId and f1.substId=s.substId
and f1.comp=f2.comp
and f1.subcomp = f2.subcomp
and f1.unit = f2.unit
and f1.impactId = f2.impactId
and f1.method = f2.method
and f1.factorValue <> f2.factorValue; """
factor_conflicts = pd.read_sql(sql_command, self.conn)
for i, row in factor_conflicts.iterrows():
self.log.warning("FAIL! Different characterization factor "
"values for same flow-impact pair? Conflict:")
self.log.warning(row.values)
self.conn.commit()
def _characterisation_matching(self):
""" Produce stressor (STR) and impact (IMP) labels, and
characterisation matrix (C).
"""
c = self.conn.cursor()
# Get all inventory flows straight in labelss_out
c.execute(
"""
insert into labels_out(
dsid, substId, comp, subcomp,name, name2, cas, tag, unit)
select distinct
id, substId, comp, subcomp,name, name2, cas, tag, unit
from labels_inventory;
""")
c.execute("""
insert into labels_out(
substid, comp, subcomp, name, name2, cas, tag, unit)
select distinct
lc.substid, lc.comp, lc.subcomp, lc.name, lc.name2, lc.cas,
lc.tag, lc.unit
from labels_char lc
where not exists(select 1 from labels_out lo
where lo.substid=lc.substid
and lo.comp = lc.comp
and lo.subcomp = lc.subcomp
and lo.unit = lc.unit)
""") # TODO: could improve labels_out, minimum data, then left join
# for cas, ardaid, name2, etc.
sql_command = """
update or ignore labels_out
set ardaid=(select ardaid from old_labels ol
where labels_out.substId=ol.substId
and labels_out.comp=ol.comp
and labels_out.subcomp = ol.subcomp
and labels_out.unit = ol.unit)
where labels_out.ardaid is null;
"""
self._updatenull_log(sql_command, 'labels_out', 'ardaid', log_msg=
" Matched {} with ArdaID from old labels, out of {} unmatched rows."
)
# MATCH LABEL_OUT ROW WITH CHARACTERISATION FACTORS
# first match based on perfect comp correspondence
c.execute(
"""
INSERT INTO obs2char(
flowId, impactId, factorId, factorValue, scheme)
SELECT DISTINCT
lo.id, f.impactId, f.factorId, f.factorValue, f.method
FROM
labels_out lo, factors f
WHERE
lo.substId = f.substId AND
lo.comp = f.comp AND
lo.subcomp = f.subcomp AND
f.method = '{}' AND
lo.unit = f.unit;
""".format(scrub(self.char_method)))
self.log.info("Matched {} flows and factors, with exact subcomp"
" matching".format(c.rowcount))
# second insert for approximate subcomp
c.execute(
"""
INSERT or ignore INTO obs2char(
flowId, impactId, factorId, factorValue, scheme)
SELECT DISTINCT
lo.id, f.impactId, f.factorId, f.factorValue, f.method
FROM
labels_out lo, factors f, obs2char_subcomps ocs
WHERE
lo.substId = f.substId AND
lo.comp = f.comp AND
lo.subcomp = ocs.obs_sc AND ocs.char_sc = f.subcomp AND
f.method = '{}' AND
lo.unit = f.unit;
""".format(scrub(self.char_method)))
self.log.info("Matched {} flows and factors, with approximate subcomp"
" matching".format(c.rowcount))
# third insert for subcomp 'unspecified'
c.execute(
"""
INSERT or ignore INTO obs2char(
flowId, impactId, factorId, factorValue, scheme)
SELECT DISTINCT
lo.id, f.impactId, f.factorId, f.factorValue, f.method
FROM
labels_out lo, factors f, obs2char_subcomps ocs
WHERE
lo.substId = f.substId AND
lo.comp = f.comp AND
f.subcomp = 'unspecified' AND
f.method = '{}' AND
lo.unit = f.unit;
""".format(scrub(self.char_method)))
self.log.info("Matched {} flows and factors, with 'unspecified' subcomp"
" matching".format(c.rowcount))
# fourth insert for subcomp fallback
c.execute(
"""
INSERT or ignore INTO obs2char(
flowId, dsid, impactId, factorId, factorValue, scheme)
SELECT DISTINCT
lo.id, lo.dsid, f.impactId, f.factorId, f.factorValue, f.method
FROM
labels_out lo, factors f, fallback_sc fsc
WHERE
lo.substId = f.substId AND
lo.comp = f.comp AND lo.comp=fsc.comp AND
f.subcomp=fsc.fallbacksubcomp AND
f.method = '{}' AND
lo.unit = f.unit;
""".format(scrub(self.char_method)))
self.log.info("Matched {} flows and factors, by falling back to a "
"default subcompartment".format(c.rowcount))
sql_command="""SELECT DISTINCT *
FROM labels_out lo
WHERE lo.id NOT IN (SELECT DISTINCT flowId
FROM obs2char)
order by name;"""
unchar_flow=pd.read_sql(sql_command, self.conn)
filename = os.path.join(self.log_dir,'uncharacterized_flows.csv')
unchar_flow.to_csv(filename, sep='|')
self.log.warning("This leaves {} flows uncharacterized, see {}".format(
unchar_flow.shape[0], filename))
sql_command="""
select distinct s.substId, s.aName, s.cas, s.tag
from substances s
where s.substId not in (
select distinct lo.substid
from labels_out lo
where lo.id in (select distinct flowId
from obs2char)
)
order by s.aName;
"""
unchar_subst=pd.read_sql(sql_command, self.conn)
filename=os.path.join(self.log_dir,'uncharacterized_subst.csv')
unchar_subst.to_csv(filename , sep='|')
self.log.warning("These uncharacterized flows include {} "
"substances, see {}".format(unchar_subst.shape[0],
filename))
# Gett unmatcheds substances sans land-use issues
c.execute("""
select count(*) from (
select distinct s.substId, s.aName, s.cas, s.tag
from substances s
where s.substId not in (
select distinct lo.substid
from labels_out lo
where lo.id in (select distinct flowId
from obs2char)
)
and s.aName not like 'transformation%'
and s.aName not like 'occupation%'
);
""")
self.log.warning("Of these uncharacterized 'substances', {} are "
" not land occupation or transformation.".format(
c.fetchone()[0]))
def generate_characterized_extensions(self):
# get labels from database
self.STR = pd.read_sql("select * from labels_out",
self.conn,
index_col='id')
self.IMP = pd.read_sql("""select * from impacts
order by perspective, unit, impactId""",
self.conn)
self.IMP.set_index('impactId',drop=False,inplace=True)
self.IMP.index.name='index'
# get table and pivot
obs2char = pd.read_sql("select * from obs2char", self.conn)
self.C = pd.pivot_table(obs2char,
values='factorValue',
columns='flowId',
index='impactId').reindex_axis(self.STR.index, 1)
self.C = self.C.reindex_axis(self.IMP.index, 0).fillna(0)
# Reorganize elementary flows to follow STR order
Forg = self.F.fillna(0)
self.F = self.F.reindex_axis(self.STR.ix[:, 'dsid'].values, 0).fillna(0)
self.F.index = self.STR.index.copy()
# safety assertions
assert(np.allclose(self.F.values.sum(), Forg.values.sum()))
assert((self.F.values > 0).sum() == (Forg.values > 0).sum())
def make_compatible_with_arda(self, ardaidmatching_file):
""" For backward compatibility, try to reuse ArdaIds from previous
version
Args
----
* ardaidmatching_file: CSV file matching ArdaID with ecoinvent2.2 DSID
and with version3 UUIDs
Dependencies:
-------------
* self.PRO_old, defined by extract_old_labels()
* self.IMP_old, defined by extract_old_labels()
* self.STR_ols, defined by extract_old_labels()
For Processes, ArdaIDs are matched using the matching file. For
elementary flows, the matching is already done from
_integrate_old_labels(). For impact categories, matching based on
acronyms. For processes, elementary flows and impacts without an Id,
one is serially defined.
"""
def complement_ardaid(label, old_label, column='ardaid',step=10,
name='an_unamed_label'):
""" Generate ArdaId for processes, stressors or impacts needing it
"""
# Start above the maximum historical ID (+ step, for buffer)
anId = old_label.ardaid.max() + step
# Loop through all rows, fix NaN or Null ArdaIds
for i, row in label.iterrows():
if not row['ardaid'] > 0:
anId +=1
label.ix[i,'ardaid'] = anId
# Make sure all Ids are unique to start with
if len(label.ix[:, column].unique()) != len(label.ix[:, column]):
self.log.error('There are duplicate Ids in {}'.format(name))
return label
def finalize_indexes(label, old_index, duplicate_cols):
# return to original indexes
label = label.set_index(old_index.name)
# make sure that indexes have not changed
assert(set(old_index) == set(label.index))
# Go back to original order
label = label.reindex(old_index)
# Remove columns leftover during the merge
label = label.drop(duplicate_cols, 1)
return label
def organize_labels(label, fullnamecols, firstcols, lastcols):
""" Ensure the columns in the right place for Arda to understand
* Fullname must be first column
* ArdaId must be second
* Unit must be last
"""
for i in fullnamecols:
try:
full = full + '/' + label[i]
except NameError:
full = label[i]
full.name='fullname'
l = pd.concat([full,
label[firstcols],
label.drop(firstcols + lastcols, 1),
label[lastcols]],
axis=1)
return l
# As much as possible, assign PRO with old ArdaID, for backward
# compatibility. For PRO, do it with official UUID-DSID matching
a = | pd.read_csv(ardaidmatching_file) | pandas.read_csv |
from __future__ import division
import pandas as pd
import logging
import numpy as np
from numpy.random import RandomState
from trumania.core.operations import AddColumns
from trumania.core.random_generators import DependentGenerator
from trumania.core.util_functions import latest_date_before
class Clock(object):
"""
A Clock is the central object managing the evolution of time of the whole circus.
It's generating timestamps on demand, and provides information for TimeProfiler objects.
"""
def __init__(self, start, step_duration, seed):
"""Create a Clock object.
:type start: pd.Timestamp
:param start: instant of start of the generation
:type step_duration: pd.Timedelta
:param step_duration: duration of a clock step
:type seed: int
:param seed: seed for timestamp generator (if steps are more than 1 sec)
:return: a new Clock object, initialised
"""
self.current_date = start
self.step_duration = step_duration
self.__state = RandomState(seed)
self.ops = self.ClockOps(self)
self.__increment_listeners = []
def register_increment_listener(self, listener):
"""Add an object to be incremented at each step (such as a TimeProfiler)
"""
self.__increment_listeners.append(listener)
def increment(self):
"""Increments the clock by 1 step
:rtype: NoneType
:return: None
"""
self.current_date += self.step_duration
for listener in self.__increment_listeners:
listener.increment()
def get_timestamp(self, size=1, random=True, log_format=None):
"""
Returns timestamps formatted as string
:type size: int
:param size: number of timestamps to generate, default 1
:type random: boolean
:param random: if True, the timestamps are randomly generated in [
self.current_date, self.current_date+self.step_duration]
:type log_format: string
:param log_format: string format of the generated timestamps
:rtype: Pandas Series
:return: random timestamps in the form of strings
"""
if log_format is None:
log_format = "%Y-%m-%d %H:%M:%S"
def make_ts(delta_secs):
date = self.current_date + pd.Timedelta(seconds=delta_secs)
return date.strftime(log_format)
if random:
step_secs = int(self.step_duration.total_seconds())
return pd.Series(self.__state.choice(step_secs, size)).apply(make_ts)
else:
return pd.Series([self.current_date.strftime(log_format)] * size)
def n_iterations(self, duration):
"""
:type duration: pd.Timedelta
:return: the smallest number of iteration of this clock s.t. the
corresponding duration is >= duration
"""
step_secs = self.step_duration.total_seconds()
return int(np.ceil(duration.total_seconds() / step_secs))
class ClockOps(object):
def __init__(self, clock):
self.clock = clock
class Timestamp(AddColumns):
def __init__(self, clock, named_as, random, log_format):
AddColumns.__init__(self)
self.clock = clock
self.named_as = named_as
self.random = random
self.log_format = log_format
def build_output(self, story_data):
values = self.clock.get_timestamp(
size=story_data.shape[0], random=self.random,
log_format=self.log_format).values
df = pd.DataFrame({self.named_as: values},
index=story_data.index)
return df
def timestamp(self, named_as, random=True, log_format=None):
"""
Generates a random timestamp within the current time slice
"""
return self.Timestamp(self.clock, named_as, random, log_format)
class CyclicTimerGenerator(DependentGenerator):
"""A TimeProfiler contains an activity profile over a defined time range.
It's mostly a super class, normally only its child classes should be used.
The goal of a TimeProfiler is to keep a track of the expected level of activity of users over a cyclic time range
It will store a vector with probabilities of activity per time step, as well as a cumulative sum of the
probabilities starting with the current time step.
This allows to quickly produce random waiting times until the next event for the users
"""
def __init__(self, clock, seed, config):
"""
This should not be used, only child classes
:type clock: Clock
:param clock: the master clock driving this simulator
:type seed: int
:param seed: seed for random number generator, default None
:return: A new TimeProfiler is created
"""
DependentGenerator.__init__(self)
self._state = RandomState(seed)
self.config = config
self.clock = clock
# "macro" time shift: we shift the whole profile n times in the future
# or the past until it overlaps with the current clock date
init_date = latest_date_before(
starting_date=config.start_date,
upper_bound=clock.current_date,
time_step=pd.Timedelta(config.profile_time_steps) * len(
config.profile))
# Un-scaled weight profile. We artificially adds a nan to force the
# up-sclaling to multiply the last element
profile_idx = pd.date_range(start=init_date,
freq=config.profile_time_steps,
periods=len(config.profile) + 1)
profile_ser = pd.Series(data=config.profile + [np.nan],
index=profile_idx)
# scaled weight profile, s.t. one clock step == one profile value
profile_ser = profile_ser.resample(rule=clock.step_duration).pad()[:-1]
self.n_time_bin = profile_ser.shape[0]
profile_cdf = (profile_ser / profile_ser.sum()).cumsum()
self.profile = pd.DataFrame({"cdf": profile_cdf,
# for debugging
"timeframe": np.arange(len(profile_cdf))})
# "micro" time shift,: we step forward along the profile until it is
# align with the current date
while self.profile.index[0] < clock.current_date:
self.increment()
# makes sure we'll get notified when the clock goes forward
clock.register_increment_listener(self)
def increment(self):
"""
Increment the time generator by 1 step.
This has as effect to move the cdf of one step to the left, decrease
all values by the value of the original first entry, and placing the
previous first entry at the end of the cdf, with value 1.
"""
self.profile["cdf"] -= self.profile["cdf"].iloc[0]
self.profile = pd.concat([self.profile.iloc[1:], self.profile.iloc[:1]])
self.profile.loc[self.profile.index[-1], "cdf"] = 1
def generate(self, observations):
"""Generate random waiting times, based on some observed activity
levels. The higher the level of activity, the shorter the waiting
times will be
:type observations: Pandas Series
:param observations: contains an array of floats
:return: Pandas Series
"""
activities = observations
# activities less often than once per cycle length
low_activities = activities.where((activities <= 2) & (activities > 0)).dropna()
if low_activities.shape[0] > 0:
draw = self._state.uniform(size=low_activities.shape[0])
# A uniform [0, 2/activity] yields an expected freqs == 1/activity
# == average period between story.
# => n_cycles is the number of full timer cycles from now until
# next story. It's typically not an integer and possibly be > 1
# since we have on average less han 1 activity per cycle of this
# timer.
n_cycles = 2 * draw / low_activities.values
timer_slots = n_cycles % 1
n_cycles_int = n_cycles - timer_slots
timers = self.profile["cdf"].searchsorted(timer_slots) + \
self.n_time_bin * n_cycles_int
low_activity_timer = pd.Series(timers, index=low_activities.index)
else:
low_activity_timer = pd.Series()
high_activities = activities.where(activities > 2).dropna()
if high_activities.shape[0] > 0:
# A beta(1, activity-1) will yield expected frequencies of
# 1/(1+activity-1) == 1/activity == average period between story.
# This just stops to work for activities < 1, or even close to one
# => we use the uniform mechanism above for activities <= 2 and
# rely on betas here for expected frequencies of 2 per cycle or
# higher
timer_slots = high_activities.apply(
lambda activity: self._state.beta(1, activity - 1))
timers = self.profile["cdf"].searchsorted(timer_slots, side="left")
high_activity_timer = pd.Series(timers, index=high_activities.index)
else:
high_activity_timer = pd.Series()
all_timers = pd.concat([low_activity_timer, high_activity_timer])
# Not sure about that one, there seem to be a bias somewhere that
# systematically generates too large timer. Maybe it's a rounding
# effect of searchsorted() or so. Or a bug elsewhere ?
all_timers = all_timers.apply(lambda d: max(0, d - 1))
# makes sure all_timers is in the same order and with the same index
# as input observations, even in case of duplicate index values
all_timers = all_timers.reindex_like(observations)
return all_timers
def activity(self, n, per):
"""
:param n: number of stories
:param per: time period for that number of stories
:type per: pd.Timedelta
:return: the activity level corresponding to the specified number of n
executions per time period
"""
scale = self.config.duration().total_seconds() / per.total_seconds()
activity = n * scale
requested_period = pd.Timedelta(seconds=per.total_seconds() / n)
if requested_period < self.clock.step_duration:
logging.warning(
"Warning: Creating activity level for {} stories per "
"{} => activity is {} but period is {}, which is "
"shorter than the clock period ({}). This clock "
"cannot keep up with such rate and less events will be"
" produced".format(n, per, activity, requested_period,
self.clock.step_duration)
)
return activity
class CyclicTimerProfile(object):
"""
Static parameters of the Timer profile. Separated from the timer gen
itself to facilitate persistence.
:type profile: python array
:param profile: Weight of each period
:type profile_time_steps: string
:param profile_time_steps: duration of the time-steps in the profile
(e.g. "15min")
:type start_date: pd.Timestamp
:param start_date: date of the origin of the specified profile =>
this is used to align with the values of the clock
"""
def __init__(self, profile, profile_time_steps, start_date):
self.start_date = start_date
self.profile = profile
self.profile_time_steps = profile_time_steps
def save_to(self, file_path):
logging.info("saving timer generator to {}".format(file_path))
saved_df = pd.DataFrame({("value", "profile"): self.profile},
dtype=str).stack()
saved_df.index = saved_df.index.reorder_levels([1, 0])
saved_df.loc[("start_date", 0)] = self.start_date
saved_df.loc[("profile_time_steps", 0)] = self.profile_time_steps
saved_df.to_csv(file_path)
@staticmethod
def load_from(file_path):
saved_df = pd.read_csv(file_path, index_col=[0, 1])
profile = saved_df.loc[("profile", slice(None))]\
.unstack()\
.astype(float)\
.tolist()
profile_time_steps = saved_df.loc["profile_time_steps"].values[0][0]
start_date = pd.Timestamp(saved_df.loc["start_date"].values[0][0])
return CyclicTimerProfile(profile, profile_time_steps, start_date)
def duration(self):
"""
:return: the total duration corresponding to this time profile
"""
return len(self.profile) * | pd.Timedelta(self.profile_time_steps) | pandas.Timedelta |
import pandas as pd
import time
# Grab the DLA HAR dataset from:
# http://groupware.les.inf.puc-rio.br/har
# http://groupware.les.inf.puc-rio.br/static/har/dataset-har-PUC-Rio-ugulino.zip
#
# TODO: Load up the dataset into dataframe 'X'
#
# .. your code here ..
#
# TODO: Clean up any column with commas in it
# so that they're properly represented as decimals instead
#
# .. your code here ..
X = | pd.read_csv('E:/Github/DAT210x-Lab/Module6/Datasets/PUC.csv', sep=';', index_col=0, decimal=',') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 17 17:07:51 2019
@author: jeichman
"""
import pandas as pd
import os
import fnmatch
dir0 = 'C:/Users/jeichman/Documents/gamsdir/projdir/RODeO/Projects/VTA_bus_project2/Output/' # Location of files to load
c0 = 0
for files2load in os.listdir(dir0):
if fnmatch.fnmatch(files2load, 'Storage_dispatch_resultsDevices*'):
if c0==0:
files2load2 = [files2load]
c0=c0+1
else:
files2load2.append(files2load)
c0=c0+1
c0 = 0
c2 = [0]
for files2load in files2load2:
# Load files and melt into shape
results_devices = | pd.read_csv(dir0 + files2load,sep=',') | pandas.read_csv |
#!/usr/bin/env python
# encoding: utf-8
from collections import defaultdict
from css_html_js_minify import html_minify
from functools import lru_cache
from jinja2 import Environment, FileSystemLoader
from operator import itemgetter
from pathlib import Path
from pyvis.network import Network
import codecs
import json
import networkx as nx
import numpy as np
import pandas as pd
import scipy.stats as stats
import sys
import time
import traceback
class RCNeighbors:
def __init__ (self):
self.prov = []
self.data = []
self.publ = []
self.jour = []
self.auth = []
self.topi = []
def serialize (self, t0, cache_token):
"""
serialize this subgraph/neighborhood as JSON
"""
view = {
"prov": sorted(self.prov, key=lambda x: x[1], reverse=True),
"data": sorted(self.data, key=lambda x: x[1], reverse=True),
"publ": sorted(self.publ, key=lambda x: x[1], reverse=True),
"jour": sorted(self.jour, key=lambda x: x[1], reverse=True),
"auth": sorted(self.auth, key=lambda x: x[1], reverse=True),
"topi": sorted(self.topi, key=lambda x: x[1], reverse=True),
"toke": cache_token,
"time": "{:.2f}".format((time.time() - t0) * 1000.0)
}
return json.dumps(view, indent=4, sort_keys=True, ensure_ascii=False)
class RCNetworkNode:
def __init__ (self, view=None, elem=None):
self.view = view
self.elem = elem
class RCNetwork:
MAX_TITLE_LEN = 100
Z_975 = stats.norm.ppf(q=0.975)
def __init__ (self):
self.id_list = []
self.labels = {}
self.nxg = None
self.scale = {}
self.prov = {}
self.data = {}
self.publ = {}
self.jour = {}
self.auth = {}
self.topi = {}
def get_id (self, id):
"""
lookup the numeric ID for an element
"""
return int(self.id_list.index(id))
def parse_metadata (self, elem):
"""
parse the required metadata items from one element in the graph
"""
kind = elem["@type"]
title = elem["dct:title"]["@value"]
id = elem["@id"].split("#")[1]
self.id_list.append(id)
self.labels[self.get_id(id)] = title
return id, kind, title, elem
def parse_corpus (self, path):
"""
parse each of the entities within the KG
"""
with codecs.open(path, "r", encoding="utf8") as f:
jld_corpus = json.load(f)
corpus = jld_corpus["@graph"]
entities = [ self.parse_metadata(e) for e in corpus ]
unknown_journal = None
# providers
for id, kind, title, elem in entities:
if kind == "Provider":
if "dct:identifier" in elem:
ror = elem["dct:identifier"]["@value"]
else:
ror = ""
self.prov[id] = RCNetworkNode(
view={
"id": id,
"title": title,
"ror": ror
},
elem=elem
)
# datasets
for id, kind, title, elem in entities:
if kind == "Dataset":
prov_id = elem["dct:publisher"]["@value"]
# url, if any
if "foaf:page" in elem:
url = elem["foaf:page"]["@value"]
else:
url = None
self.data[id] = RCNetworkNode(
view={
"id": id,
"title": title,
"provider": prov_id,
"url": url
},
elem=elem
)
# journals
for id, kind, title, elem in entities:
if kind == "Journal":
if title == "unknown":
unknown_journal = id
else:
if "dct:identifier" in elem:
issn = elem["dct:identifier"]["@value"]
else:
issn = ""
# url, if any
if "foaf:page" in elem:
url = elem["foaf:page"]["@value"]
else:
url = None
self.jour[id] = RCNetworkNode(
view={
"id": id,
"title": title,
"issn": issn,
"url": url
},
elem=elem
)
# authors
for id, kind, title, elem in entities:
if kind == "Author":
if "dct:identifier" in elem:
orcid = elem["dct:identifier"]["@value"]
else:
orcid = ""
self.auth[id] = RCNetworkNode(
view={
"id": id,
"title": title,
"orcid": orcid
},
elem=elem
)
# topics
for id, kind, title, elem in entities:
if kind == "Topic":
self.topi[id] = RCNetworkNode(
view={
"id": id,
"title": title
},
elem=elem
)
# publications
for id, kind, title, elem in entities:
if kind == "ResearchPublication":
# link the datasets
data_list = []
l = elem["cito:citesAsDataSource"]
# if there's only one element, JSON-LD will link
# directly rather than enclose within a list
if isinstance(l, dict):
l = [l]
for d in l:
data_id = d["@id"].split("#")[1]
self.data[data_id].view["used"] = True
data_list.append(data_id)
prov_id = self.data[data_id].view["provider"]
self.prov[prov_id].view["used"] = True
# link the authors
auth_list = []
if "dct:creator" in elem:
l = elem["dct:creator"]
else:
l = []
# ibid.
if isinstance(l, dict):
l = [l]
for a in l:
auth_id = a["@id"].split("#")[1]
self.auth[auth_id].view["used"] = True
auth_list.append(auth_id)
# link the topics
topi_list = []
if "dct:subject" in elem:
l = elem["dct:subject"]
else:
l = []
# ibid.
if isinstance(l, dict):
l = [l]
for t in l:
topi_id = t["@id"].split("#")[1]
self.topi[topi_id].view["used"] = True
topi_list.append(topi_id)
# add DOI
if "dct:identifier" in elem:
doi = elem["dct:identifier"]["@value"]
else:
doi = ""
# add journal
if "dct:publisher" in elem:
jour_id = elem["dct:publisher"]["@id"].split("#")[1]
if jour_id == unknown_journal:
jour_id = None
else:
self.jour[jour_id].view["used"] = True
# add abstract
if "cito:description" in elem:
abstract = elem["cito:description"]["@value"]
else:
abstract = ""
# open access PDF, if any
if "openAccess" in elem:
pdf = elem["openAccess"]["@value"]
else:
pdf = None
self.publ[id] = RCNetworkNode(
view={
"id": id,
"title": title,
"doi": doi,
"pdf": pdf,
"journal": jour_id,
"abstract": abstract,
"datasets": data_list,
"authors": auth_list,
"topics": topi_list
},
elem=elem
)
######################################################################
## graph analytics
@classmethod
@lru_cache()
def point_estimate (cls, x, n):
return (float(x) + cls.Z_975) / (float(n) + 2.0 * cls.Z_975)
def propagate_pdf (self, entity_class, entity_kind):
"""
propagate probability distribution functions across the graph,
for conditional probabilities related to datasets
"""
trials = defaultdict(int)
counts = defaultdict(dict)
for p in self.publ.values():
if p.view[entity_kind]:
coll = p.view[entity_kind]
if isinstance(coll, str):
coll = [coll]
for e in coll:
n = float(len(p.view["datasets"]))
trials[e] += n
for d in p.view["datasets"]:
if d not in counts[e]:
counts[e][d] = 1
else:
counts[e][d] += 1
for e in entity_class.values():
e_id = e.view["id"]
mle = {}
for d, x in counts[e_id].items():
pt_est = self.point_estimate(x, trials[e_id])
mle[self.get_id(d)] = [x, pt_est]
e.view["mle"] = mle
def build_analytics_graph (self):
"""
build a graph to calculate analytics
"""
self.nxg = nx.Graph()
for p in self.prov.values():
if "used" in p.view:
self.nxg.add_node(self.get_id(p.view["id"]))
for d in self.data.values():
if "used" in d.view:
self.nxg.add_node(self.get_id(d.view["id"]))
self.nxg.add_edge(self.get_id(d.view["id"]), self.get_id(d.view["provider"]), weight=10.0)
for a in self.auth.values():
if "used" in a.view:
self.nxg.add_node(self.get_id(a.view["id"]))
for j in self.jour.values():
if "used" in j.view:
self.nxg.add_node(self.get_id(j.view["id"]))
for t in self.topi.values():
if "used" in t.view:
self.nxg.add_node(self.get_id(t.view["id"]))
for p in self.publ.values():
self.nxg.add_node(self.get_id(p.view["id"]))
if p.view["journal"]:
self.nxg.add_edge(self.get_id(p.view["id"]), self.get_id(p.view["journal"]), weight=1.0)
for d in p.view["datasets"]:
self.nxg.add_edge(self.get_id(p.view["id"]), self.get_id(d), weight=20.0)
for a in p.view["authors"]:
self.nxg.add_edge(self.get_id(p.view["id"]), self.get_id(a), weight=20.0)
for t in p.view["topics"]:
self.nxg.add_edge(self.get_id(p.view["id"]), self.get_id(t), weight=10.0)
@classmethod
def calc_quantiles (cls, metrics, num_q):
"""
calculate quantiles for the given list of metrics
"""
bins = np.linspace(0, 1, num=num_q, endpoint=True)
s = pd.Series(metrics)
q = s.quantile(bins, interpolation="nearest")
try:
dig = np.digitize(metrics, q) - 1
except ValueError as e:
print("ValueError:", str(e), metrics, s, q, bins)
sys.exit(-1)
quantiles = []
for idx, q_hi in q.iteritems():
quantiles.append(q_hi)
return quantiles
def scale_ranks (self, scale_factor=3):
"""
run quantile analysis on centrality metrics, assessing the
relative impact of each element in the KG
"""
result = nx.eigenvector_centrality_numpy(self.nxg, weight="weight")
ranks = list(result.values())
quant = self.calc_quantiles(ranks, num_q=10)
num_quant = len(quant)
for id, rank in sorted(result.items(), key=itemgetter(1), reverse=True):
impact = stats.percentileofscore(ranks, rank)
scale = (((impact / num_quant) + 5) * scale_factor)
self.scale[id] = [int(round(scale)), impact / 100.0]
def load_network (self, path):
"""
run the full usage pattern, prior to use of serialize() or
subgraph()
"""
t0 = time.time()
self.parse_corpus(path)
self.propagate_pdf(self.auth, "authors")
self.propagate_pdf(self.jour, "journal")
self.propagate_pdf(self.topi, "topics")
self.build_analytics_graph()
self.scale_ranks()
elapsed_time = (time.time() - t0) * 1000.0
return elapsed_time
######################################################################
## ser/de for pre-computing, then later a fast load/launch
def serialize (self, links, path=Path("precomp.json")):
"""
serialize all of the data structures required to recreate the
knowledge graph
"""
g = nx.readwrite.json_graph.node_link_data(self.nxg),
view = [
g,
links,
self.id_list,
list(self.labels.items()),
list(self.scale.items()),
[ p.view for p in self.prov.values() ],
[ d.view for d in self.data.values() ],
[ p.view for p in self.publ.values() ],
[ j.view for j in self.jour.values() ],
[ a.view for a in self.auth.values() ],
[ t.view for t in self.topi.values() ]
]
with codecs.open(path, "wb", encoding="utf8") as f:
json.dump(view, f, ensure_ascii=False)
def deserialize (self, path=Path("precomp.json")):
"""
deserialize all of the data structures required to recreate
the knowledge graph
"""
with codecs.open(path, "r", encoding="utf8") as f:
view = json.load(f)
g, links, id_list, labels, scale, prov, data, publ, jour, auth, topi = view
# deserialize the graph metadata
self.nxg = nx.readwrite.json_graph.node_link_graph(g[0])
self.id_list = id_list
for k, v in labels:
self.labels[k] = v
for k, v in scale:
self.scale[k] = v
# deserialize each dimension of entities in the KG
for view in prov:
self.prov[view["id"]] = RCNetworkNode(view=view)
for view in data:
self.data[view["id"]] = RCNetworkNode(view=view)
for view in publ:
self.publ[view["id"]] = RCNetworkNode(view=view)
for view in jour:
self.jour[view["id"]] = RCNetworkNode(view=view)
for view in auth:
self.auth[view["id"]] = RCNetworkNode(view=view)
for view in topi:
self.topi[view["id"]] = RCNetworkNode(view=view)
return links
######################################################################
## linked data viewer
@classmethod
def get_template (cls, template_folder, template_path):
"""
load a Jinja2 template
"""
return Environment(loader=FileSystemLoader(template_folder)).get_template(template_path)
@classmethod
def render_template (cls, template, **kwargs):
return html_minify(template.render(kwargs)).replace(" ", " ").replace("> <", "><").replace(" >", ">")
def setup_render (self, template_folder):
self.data_template = self.get_template(template_folder, "links/data.html")
self.prov_template = self.get_template(template_folder, "links/prov.html")
self.publ_template = self.get_template(template_folder, "links/publ.html")
self.jour_template = self.get_template(template_folder, "links/jour.html")
self.auth_template = self.get_template(template_folder, "links/auth.html")
self.topi_template = self.get_template(template_folder, "links/topi.html")
def calc_rank (self, rerank, neighbor, e):
"""
calculate a distance metric to the selected dataset
"""
neighbor_scale, neighbor_impact = self.scale[neighbor]
rank = (0, 0, 0.0, neighbor_impact)
if rerank:
p = self.publ[self.id_list[neighbor]]
if "rank" in p.view:
rank = p.view["rank"]
else:
if rerank in e.view["mle"]:
count, pt_est = e.view["mle"][rerank]
else:
count = 0
pt_est = 0.0
rank = (0, count, pt_est, neighbor_impact)
return rank
def reco_prov (self, p):
"""
recommend ordered links to this provider entity
"""
uuid = None
title = None
rank = None
url = None
ror = None
data_list = None
p_id = self.get_id(p.view["id"])
if p_id in self.scale:
scale, impact = self.scale[p_id]
edges = self.nxg[self.get_id(p.view["id"])]
data_list = []
for neighbor, attr in edges.items():
neighbor_scale, neighbor_impact = self.scale[neighbor]
data_list.append([ neighbor, self.labels[neighbor], neighbor_impact ])
if len(p.view["ror"]) < 1:
ror = None
url = None
else:
ror = p.view["ror"].replace("https://ror.org/", "")
url = p.view["ror"]
uuid = p.view["id"]
title = p.view["title"]
rank = "{:.4f}".format(impact)
data_list = sorted(data_list, key=lambda x: x[2], reverse=True)
return uuid, title, rank, url, ror, data_list
def render_prov (self, p):
"""
render HTML for a provider
"""
html = None
uuid, title, rank, url, ror, data_list = self.reco_prov(p)
if uuid:
html = self.render_template(
self.prov_template,
uuid=uuid,
title=title,
rank=rank,
url=url,
ror=ror,
data_list=data_list
)
return html
def reco_data (self, d):
"""
recommend ordered links to this dataset entity
"""
uuid = None
title = None
rank = None
url = None
provider = None
publ_list = []
d_id = self.get_id(d.view["id"])
if d_id in self.scale:
scale, impact = self.scale[d_id]
edges = self.nxg[self.get_id(d.view["id"])]
publ_list = []
p_id = self.get_id(d.view["provider"])
seen_set = set([ p_id ])
for neighbor, attr in edges.items():
if neighbor not in seen_set:
neighbor_scale, neighbor_impact = self.scale[neighbor]
publ_list.append([ neighbor, self.labels[neighbor], neighbor_impact ])
uuid = d.view["id"]
title = d.view["title"]
rank = "{:.4f}".format(impact)
url = d.view["url"]
provider = [p_id, self.labels[p_id]]
publ_list = sorted(publ_list, key=lambda x: x[2], reverse=True)
return uuid, title, rank, url, provider, publ_list
def render_data (self, d):
"""
render HTML for a dataset
"""
html = None
uuid, title, rank, url, provider, publ_list = self.reco_data(d)
if uuid:
html = self.render_template(
self.data_template,
uuid=uuid,
title=title,
rank=rank,
url=url,
provider=provider,
publ_list=publ_list
)
return html
def reco_auth (self, a, rerank):
"""
recommend ordered links to this author entity
"""
uuid = None
title = None
rank = None
url = None
orcid = None
publ_list = None
a_id = self.get_id(a.view["id"])
if a_id in self.scale:
scale, impact = self.scale[a_id]
edges = self.nxg[self.get_id(a.view["id"])]
publ_list = []
for neighbor, attr in edges.items():
rank = self.calc_rank(rerank, neighbor, a)
publ_list.append([ neighbor, self.labels[neighbor], rank ])
if len(a.view["orcid"]) < 1:
orcid = None
url = None
else:
orcid = a.view["orcid"].replace("https://orcid.org/", "")
url = a.view["orcid"]
uuid = a.view["id"]
title = a.view["title"]
rank = "{:.4f}".format(impact)
publ_list = sorted(publ_list, key=lambda x: x[2], reverse=True)
return uuid, title, rank, url, orcid, publ_list
def render_auth (self, a, rerank=False):
"""
render HTML for an author
"""
html = None
uuid, title, rank, url, orcid, publ_list = self.reco_auth(a, rerank)
if uuid:
html = self.render_template(
self.auth_template,
uuid=uuid,
title=title,
rank=rank,
url=url,
orcid=orcid,
publ_list=publ_list
)
return html
def reco_jour (self, j):
"""
recommend ordered links to this journal entity
"""
uuid = None
title = None
rank = None
url = None
issn = None
publ_list = None
j_id = self.get_id(j.view["id"])
if j_id in self.scale:
scale, impact = self.scale[j_id]
edges = self.nxg[self.get_id(j.view["id"])]
publ_list = []
for neighbor, attr in edges.items():
neighbor_scale, neighbor_impact = self.scale[neighbor]
publ_list.append([ neighbor, self.labels[neighbor], neighbor_scale ])
if len(j.view["issn"]) < 1:
issn = None
else:
issn = j.view["issn"].replace("https://portal.issn.org/resource/ISSN/", "")
if j.view["url"]:
url = j.view["url"]
elif issn:
url = issn
else:
url = None
uuid = j.view["id"]
title = j.view["title"]
rank = "{:.4f}".format(impact)
publ_list = sorted(publ_list, key=lambda x: x[2], reverse=True)
return uuid, title, rank, url, issn, publ_list
def render_jour (self, j):
"""
render HTML for a journal
"""
html = None
uuid, title, rank, url, issn, publ_list = self.reco_jour(j)
if uuid:
html = self.render_template(
self.jour_template,
uuid=uuid,
title=title,
rank=rank,
url=url,
issn=issn,
publ_list=publ_list
)
return html
def reco_topi (self, t):
"""
recommend ordered links to this topic entity
"""
uuid = None
title = None
rank = None
publ_list = None
t_id = self.get_id(t.view["id"])
if t_id in self.scale:
scale, impact = self.scale[t_id]
edges = self.nxg[self.get_id(t.view["id"])]
publ_list = []
for neighbor, attr in edges.items():
neighbor_scale, neighbor_impact = self.scale[neighbor]
publ_list.append([ neighbor, self.labels[neighbor], neighbor_scale ])
uuid = t.view["id"]
title = t.view["title"]
rank = "{:.4f}".format(impact)
publ_list = sorted(publ_list, key=lambda x: x[2], reverse=True)
return uuid, title, rank, publ_list
def render_topi (self, t):
"""
render HTML for a topic
"""
html = None
uuid, title, rank, publ_list = self.reco_topi(t)
if uuid:
html = self.render_template(
self.topi_template,
uuid=uuid,
title=title,
rank=rank,
publ_list=publ_list
)
return html
def reco_publ (self, p):
"""
recommend ordered links to this publication entity
"""
uuid = None
title = None
rank = None
url = None
doi = None
pdf = None
journal = None
abstract = None
auth_list = None
data_list = None
topi_list = None
p_id = self.get_id(p.view["id"])
if p_id in self.scale:
scale, impact = self.scale[p_id]
journal = None
if p.view["journal"]:
j_id = self.get_id(p.view["journal"])
if self.labels[j_id] != "unknown":
journal = [ j_id, self.labels[j_id] ]
auth_list = []
for a in p.view["authors"]:
a_id = self.get_id(a)
# do not sort; preserve the author order
auth_list.append([ a_id, self.labels[a_id] ])
data_list = []
for d in p.view["datasets"]:
d_id = self.get_id(d)
neighbor_scale, neighbor_impact = self.scale[d_id]
data_list.append([ d_id, self.labels[d_id], neighbor_scale ])
topi_list = []
for t in p.view["topics"]:
t_id = self.get_id(t)
neighbor_scale, neighbor_impact = self.scale[t_id]
topi_list.append([ t_id, self.labels[t_id], neighbor_scale ])
if len(p.view["doi"]) < 1:
url = None
doi = None
else:
url = p.view["doi"]
doi = p.view["doi"].replace("https://doi.org/", "")
if "abstract" not in p.view or len(p.view["abstract"]) < 1:
abstract = None
else:
abstract = p.view["abstract"]
uuid = p.view["id"]
title = p.view["title"]
rank = "{:.4f}".format(impact)
pdf = p.view["pdf"]
data_list = sorted(data_list, key=lambda x: x[2], reverse=True)
topi_list = sorted(topi_list, key=lambda x: x[2], reverse=True)
return uuid, title, rank, url, doi, pdf, journal, abstract, auth_list, data_list, topi_list
def render_publ (self, p):
"""
render HTML for a publication
"""
html = None
uuid, title, rank, url, doi, pdf, journal, abstract, auth_list, data_list, topi_list = self.reco_publ(p)
if uuid:
html = self.render_template(
self.publ_template,
uuid=uuid,
title=title,
rank=rank,
url=url,
doi=doi,
pdf=pdf,
journal=journal,
abstract=abstract,
auth_list=auth_list,
data_list=data_list,
topi_list=topi_list
)
return html
def remap_list (self, l):
"""
remap the networkx graph index values to UUIDs
"""
return [ [self.id_list[x[0]], x[1]] for x in l ]
def lookup_entity (self, uuid):
"""
get recommended links for the given entity
"""
response = None
if uuid in self.prov:
uuid, title, rank, url, ror, data_list = self.reco_prov(self.prov[uuid])
response = {
"title": title,
"rank": rank,
"url": url,
"ror": ror,
"data": self.remap_list(data_list)
}
elif uuid in self.data:
uuid, title, rank, url, provider, publ_list = self.reco_data(self.data[uuid])
response = {
"title": title,
"rank": rank,
"url": url,
"prov": [ self.id_list[provider[0]], provider[1] ],
"publ": self.remap_list(publ_list)
}
elif uuid in self.publ:
uuid, title, rank, url, doi, pdf, journal, abstract, auth_list, data_list, topi_list = self.reco_publ(self.publ[uuid])
response = {
"title": title,
"rank": rank,
"url": url,
"doi": doi,
"pdf": pdf,
"abstract": abstract,
"jour": [ self.id_list[journal[0]], journal[1] ],
"auth": self.remap_list(auth_list),
"data": self.remap_list(data_list),
"topi": self.remap_list(topi_list)
}
elif uuid in self.auth:
uuid, title, rank, url, orcid, publ_list = self.reco_auth(self.auth[uuid], rerank=False)
response = {
"title": title,
"rank": rank,
"url": url,
"orcid": orcid,
"publ": self.remap_list(publ_list)
}
elif uuid in self.jour:
uuid, title, rank, url, issn, publ_list = self.reco_jour(self.jour[uuid])
response = {
"title": title,
"rank": rank,
"url": url,
"issn": issn,
"publ": self.remap_list(publ_list)
}
elif uuid in self.topi:
uuid, title, rank, publ_list = self.reco_topi(self.topi[uuid])
response = {
"title": title,
"rank": rank,
"publ": self.remap_list(publ_list)
}
return response
def render_links (self):
"""
leverage the `nxg` graph to generate HTML to render links for
each entity in the knowledge graph
"""
links = {}
for p in self.prov.values():
links[p.view["id"]] = self.render_prov(p)
for d in self.data.values():
links[d.view["id"]] = self.render_data(d)
for a in self.auth.values():
links[a.view["id"]] = self.render_auth(a)
for j in self.jour.values():
links[j.view["id"]] = self.render_jour(j)
for t in self.topi.values():
links[t.view["id"]] = self.render_topi(t)
for p in self.publ.values():
links[p.view["id"]] = self.render_publ(p)
return links
def download_links (self, uuid):
"""
download links for the given dataset ID
"""
dataset = self.data[uuid].view["title"]
l = []
for id, node in self.publ.items():
if uuid in node.view["datasets"]:
jour_uuid = node.view["journal"]
if jour_uuid in self.jour:
jour_title = self.jour[jour_uuid].view["title"]
else:
jour_title = ""
l.append([
dataset,
node.view["title"],
jour_title,
node.view["doi"],
node.view["abstract"]
])
df = | pd.DataFrame(l, columns=["dataset", "publication", "journal", "url", "abstract"]) | pandas.DataFrame |
import random
import re
import time
from datetime import datetime
import pandas as pd
from requests import Session
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from requests_futures.sessions import FuturesSession
DEFAULT_TIMEOUT = 5
USER_AGENT_LIST = [
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36",
]
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-US,en;q=0.9",
"origin": "https://finance.yahoo.com",
"referer": "https://finance.yahoo.com",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
}
class TimeoutHTTPAdapter(HTTPAdapter):
def __init__(self, *args, **kwargs):
self.timeout = DEFAULT_TIMEOUT
if "timeout" in kwargs:
self.timeout = kwargs["timeout"]
del kwargs["timeout"]
super(TimeoutHTTPAdapter, self).__init__(*args, **kwargs)
def send(self, request, **kwargs):
timeout = kwargs.get("timeout")
if timeout is None:
kwargs["timeout"] = self.timeout
return super(TimeoutHTTPAdapter, self).send(request, **kwargs)
def _init_session(session=None, **kwargs):
session_headers = headers
if session is None:
if kwargs.get("asynchronous"):
session = FuturesSession(max_workers=kwargs.get("max_workers", 8))
else:
session = Session()
if kwargs.get("proxies"):
session.proxies = kwargs.get("proxies")
retries = Retry(
total=kwargs.get("retry", 5),
backoff_factor=kwargs.get("backoff_factor", 0.3),
status_forcelist=kwargs.get("status_forcelist", [429, 500, 502, 503, 504]),
method_whitelist=["HEAD", "GET", "OPTIONS", "POST", "TRACE"],
)
if kwargs.get("verify") is not None:
session.verify = kwargs.get("verify")
session.mount(
"https://",
TimeoutHTTPAdapter(
max_retries=retries, timeout=kwargs.get("timeout", DEFAULT_TIMEOUT)
),
)
# TODO: Figure out how to utilize this within the validate_response
# TODO: This will be a much better way of handling bad requests than
# TODO: what I'm currently doing.
# session.hooks['response'] = \
# [lambda response, *args, **kwargs: response.raise_for_status()]
user_agent = kwargs.get("user_agent", random.choice(USER_AGENT_LIST))
session_headers["User-Agent"] = user_agent
if kwargs.get("headers") and isinstance(kwargs.get("headers"), dict):
session_headers.update(**headers)
session.headers.update(**session_headers)
return session
# def get_cookies(user_agent):
# options = webdriver.ChromeOptions()
# options.add_argument('--user-agent=' + user_agent)
# options.add_argument('headless')
# driver = webdriver.Chrome(
# ChromeDriverManager().install(), chrome_options=options)
# driver.get("https://finance.yahoo.com/screener/new")
# cookies = driver.get_cookies()
# driver.quit()
# return cookies
def _flatten_list(ls):
return [item for sublist in ls for item in sublist]
def _convert_to_list(symbols, comma_split=False):
if isinstance(symbols, str):
if comma_split:
return [x.strip() for x in symbols.split(",")]
else:
return re.findall(r"[\w\-.=^&]+", symbols)
return symbols
def _convert_to_timestamp(date=None, start=True):
if date is None:
date = int((-858880800 * start) + (time.time() * (not start)))
elif isinstance(date, datetime):
date = int(time.mktime(date.timetuple()))
else:
date = int(time.mktime(time.strptime(str(date), "%Y-%m-%d")))
return date
def _history_dataframe(data, symbol, params, adj_timezone=True):
df = pd.DataFrame(data[symbol]["indicators"]["quote"][0])
if data[symbol]["indicators"].get("adjclose"):
df["adjclose"] = data[symbol]["indicators"]["adjclose"][0]["adjclose"]
df.index = | pd.to_datetime(data[symbol]["timestamp"], unit="s") | pandas.to_datetime |
import pandas as pd
class Query(object):
def __init__(self, sample, name, count, species_list, rank = "species"):
self.sample = sample
self.name = name
self.count = count
self.species_list = species_list
self.rank = rank
def remove_species(self, species_list):
for species in species_list:
if species in self.species_list:
self.species_list.remove(species)
def estimate_count(self):
self.estimated_count = self.count / len(self.species_list)
def get_unique_rank(self, species_taxonomy_map, ranks=['genus', 'family', 'order', 'class', 'phylum', 'superkingdom'], aggregate_rate=0.95):
if len(self.species_list) == 1:
return self.rank, self.species_list[0]
else:
for rank in ranks:
rank_list = [species_taxonomy_map[s][rank] for s in self.species_list]
vc = pd.value_counts(rank_list, sort=True, ascending=False)
ar = vc[0] / len(rank_list)
if ar >= aggregate_rate:
rank_name = vc.keys()[0]
if rank_name == "Unclassified":
continue
return rank, rank_name
raise Exception(f"Cannot find aggregated rank for {'.'.join(self.species_list)}")
def aggregate_to_rank(self, species_taxonomy_map, rank, aggregate_rate=0.95):
if self.rank == rank:
if len(self.species_list) > 1:
return("AmbiguousRanks")
else:
return(self.species_list[0])
rank_list = [species_taxonomy_map[s][rank] for s in self.species_list]
vc = | pd.value_counts(rank_list, sort=True, ascending=False) | pandas.value_counts |
#Import necessary package
import requests
import re
from bs4 import BeautifulSoup
import json
import html
import pandas as pd
import numpy as np
import datetime as dt
import configparser
import os
#Configure parameter
config = configparser.ConfigParser()
config.read(os.path.join(os.path.dirname(__file__), 'config.ini'))
mall = config['general']['mall']
shoplisturl = config['url']['shoplisturl']
fnblisturl = config['url']['fnblisturl']
shoplistapi = config['api']['shoplistapi']
shoplisttcapi = config['api']['shoplisttcapi']
fnblistapi = config['api']['fnblistapi']
fnblisttcapi = config['api']['fnblisttcapi']
#Get shop category data and export into csv
def getShopCategory():
#Create empty DataFrame for shop category
shopcategory = pd.DataFrame()
#Get shop category
type = 'Shopping'
url = shoplisturl
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
for listing_category in soup.find_all('div', class_= 'listing__category', attrs = {'data-tab':'category'}):
for category in listing_category.find_all('button', class_ = 'button'):
try:
shop_category_id = category.get('data-filter')
except:
shop_category_id = np.nan
try:
shop_category_name = category.text
except:
shop_category_name = np.nan
shopcategory = shopcategory.append(
{
'type':type,
'shop_category_id':shop_category_id,
'shop_category_name':shop_category_name
}, ignore_index=True
)
type = 'Dining'
url = fnblisturl
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
for listing_category in soup.find_all('div', class_= 'select-box'):
for category in listing_category.find_all('option'):
try:
shop_category_id = category.get('value')
except:
shop_category_id = np.nan
try:
shop_category_name = category.text
except:
shop_category_name = np.nan
shopcategory = shopcategory.append(
{
'type':type,
'shop_category_id':shop_category_id,
'shop_category_name':shop_category_name
}, ignore_index=True
)
shopcategory['update_date'] = dt.date.today()
shopcategory['mall'] = mall
shopcategory.drop(shopcategory[shopcategory.shop_category_id == 'all'].index, inplace = True)
shopcategory.drop(shopcategory[shopcategory.shop_category_id == 'All'].index, inplace = True)
shopcategory = shopcategory.loc[:, ['mall','type','shop_category_id','shop_category_name','update_date']]
return shopcategory
#Get shop master data and export into csv
def getShopMaster():
#Create empty DataFrame for shop master
shoplist = | pd.DataFrame() | pandas.DataFrame |
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 11 Oct 2018
# Function: batsman4s
# This function plots the number of 4s vs the runs scored in the innings by the batsman
#
###########################################################################################
def batsman4s(file, name="A Hookshot"):
'''
Plot the numbers of 4s against the runs scored by batsman
Description
This function plots the number of 4s against the total runs scored by batsman. A 2nd order polynomial regression curve is also plotted. The predicted number of 4s for 50 runs and 100 runs scored is also plotted
Usage
batsman4s(file, name="A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsman6s
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
tendulkar = getPlayerData(35320,dir="../",file="tendulkar.csv",type="batting")
homeOrAway=[1,2],result=[1,2,4]
batsman4s("tendulkar.csv", "<NAME>")
'''
# Clean the batsman file and create a complete data frame
df = clean(file)
df['Runs'] = pd.to_numeric(df['Runs'])
df['4s'] = pd.to_numeric(df['4s'])
df1 = df[['Runs','4s']].sort_values(by=['Runs'])
# Set figure size
rcParams['figure.figsize'] = 10,6
# Get numnber of 4s and runs scored
runs = pd.to_numeric(df1['Runs'])
x4s = pd.to_numeric(df1['4s'])
atitle = name + "-" + "Runs scored vs No of 4s"
# Plot no of 4s and a 2nd order curve fit
plt.scatter(runs, x4s, alpha=0.5)
plt.xlabel('Runs')
plt.ylabel('4s')
plt.title(atitle)
# Create a polynomial of degree 2
poly = PolynomialFeatures(degree=2)
runsPoly = poly.fit_transform(runs.reshape(-1,1))
linreg = LinearRegression().fit(runsPoly,x4s)
plt.plot(runs,linreg.predict(runsPoly),'-r')
# Predict the number of 4s for 50 runs
b=poly.fit_transform((np.array(50)))
c=linreg.predict(b)
plt.axhline(y=c, color='b', linestyle=':')
plt.axvline(x=50, color='b', linestyle=':')
# Predict the number of 4s for 100 runs
b=poly.fit_transform((np.array(100)))
c=linreg.predict(b)
plt.axhline(y=c, color='b', linestyle=':')
plt.axvline(x=100, color='b', linestyle=':')
plt.text(180, 0.5,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 13 Oct 2018
# Function: batsman6s
# This function plots the number of 6s vs the runs scored in the innings by the batsman
#
###########################################################################################
def batsman6s(file, name="A Hookshot") :
'''
Description
Compute and plot the number of 6s in the total runs scored by batsman
Usage
batsman6s(file, name="A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
# tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
# batsman6s("tendulkar.csv","<NAME>")
'''
x6s = []
# Set figure size
rcParams['figure.figsize'] = 10,6
# Clean the batsman file and create a complete data frame
df = clean (file)
# Remove all rows where 6s are 0
a= df['6s'] !=0
b= df[a]
x6s=b['6s'].astype(int)
runs=pd.to_numeric(b['Runs'])
# Plot the 6s as a boxplot
atitle =name + "-" + "Runs scored vs No of 6s"
df1=pd.concat([runs,x6s],axis=1)
fig = sns.boxplot(x="6s", y="Runs", data=df1)
plt.title(atitle)
plt.text(2.2, 10,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 14 Oct 2018
# Function: batsmanAvgRunsGround
# This function plots the average runs scored by batsman at the ground. The xlabels indicate
# the number of innings at ground
#
###########################################################################################
def batsmanAvgRunsGround(file, name="A Latecut"):
'''
Description
This function computed the Average Runs scored on different pitches and also indicates the number of innings played at these venues
Usage
batsmanAvgRunsGround(file, name = "A Latecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
##tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=[1,2],result=[1,2,4])
batsmanAvgRunsGround("tendulkar.csv","<NAME>")
'''
batsman = clean(file)
rcParams['figure.figsize'] = 10,6
batsman['Runs']=pd.to_numeric(batsman['Runs'])
# Aggregate as sum, mean and count
df=batsman[['Runs','Ground']].groupby('Ground').agg(['sum','mean','count'])
#Flatten multi-levels to column names
df.columns= ['_'.join(col).strip() for col in df.columns.values]
# Reset index
df1=df.reset_index(inplace=False)
atitle = name + "'s Average Runs at Ground"
plt.xticks(rotation="vertical",fontsize=8)
plt.axhline(y=50, color='b', linestyle=':')
plt.axhline(y=100, color='r', linestyle=':')
ax=sns.barplot(x='Ground', y="Runs_mean", data=df1)
plt.title(atitle)
plt.text(30, 180,'Data source-Courtesy:ESPN Cricinfo',\
horizontalalignment='center',\
verticalalignment='center',\
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 14 Oct 2018
# Function: batsmanAvgRunsOpposition
# This function plots the average runs scored by batsman versus the opposition. The xlabels indicate
# the Opposition and the number of innings at ground
#
###########################################################################################
def batsmanAvgRunsOpposition(file, name="A Latecut"):
'''
This function computes and plots the Average runs against different opposition played by batsman
Description
This function computes the mean runs scored by batsman against different opposition
Usage
batsmanAvgRunsOpposition(file, name = "A Latecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist batsmanAvgRunsGround
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=[1,2],result=[1,2,4])
batsmanAvgRunsOpposition("tendulkar.csv","<NAME>")
'''
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
batsman['Runs']=pd.to_numeric(batsman['Runs'])
# Aggregate as sum, mean and count
df=batsman[['Runs','Opposition']].groupby('Opposition').agg(['sum','mean','count'])
#Flatten multi-levels to column names
df.columns= ['_'.join(col).strip() for col in df.columns.values]
# Reset index
df1=df.reset_index(inplace=False)
atitle = name + "'s Average Runs vs Opposition"
plt.xticks(rotation="vertical",fontsize=8)
ax=sns.barplot(x='Opposition', y="Runs_mean", data=df1)
plt.axhline(y=50, color='b', linestyle=':')
plt.title(atitle)
plt.text(5, 50, 'Data source-Courtesy:ESPN Cricinfo',\
horizontalalignment='center',\
verticalalignment='center',\
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: batsmanContributionWonLost
# This plots the batsman's contribution to won and lost matches
#
###########################################################################################
def batsmanContributionWonLost(file,name="A Hitter"):
'''
Display the batsman's contribution in matches that were won and those that were lost
Description
Plot the comparative contribution of the batsman in matches that were won and lost as box plots
Usage
batsmanContributionWonLost(file, name = "A Hitter")
Arguments
file
CSV file of batsman from ESPN Cricinfo obtained with getPlayerDataSp()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanMovingAverage batsmanRunsPredict batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkarsp = getPlayerDataSp(35320,".","tendulkarsp.csv","batting")
batsmanContributionWonLost("tendulkarsp.csv","<NAME>")
'''
playersp = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
# Create a column based on result
won = playersp[playersp['result'] == 1]
lost = playersp[(playersp['result']==2) | (playersp['result']==4)]
won['status']="won"
lost['status']="lost"
# Stack dataframes
df= pd.concat([won,lost])
df['Runs']= pd.to_numeric(df['Runs'])
ax = sns.boxplot(x='status',y='Runs',data=df)
atitle = name + "-" + "- Runs in games won/lost-drawn"
plt.title(atitle)
plt.text(0.5, 200,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 17 Oct 2018
# Function: batsmanCumulativeAverageRuns
# This function computes and plots the cumulative average runs by a batsman
#
###########################################################################################
def batsmanCumulativeAverageRuns(file,name="A Leg Glance"):
'''
Batsman's cumulative average runs
Description
This function computes and plots the cumulative average runs of a batsman
Usage
batsmanCumulativeAverageRuns(file,name= "A Leg Glance")
Arguments
file
Data frame
name
Name of batsman
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanCumulativeStrikeRate bowlerCumulativeAvgEconRate bowlerCumulativeAvgWickets
Examples
batsmanCumulativeAverageRuns("tendulkar.csv", "<NAME>")
'''
batsman= clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
runs=pd.to_numeric(batsman['Runs'])
# Compute cumulative average
cumAvg = runs.cumsum()/pd.Series(np.arange(1, len(runs)+1), runs.index)
atitle = name + "- Cumulative Average vs No of innings"
plt.plot(cumAvg)
plt.xlabel('Innings')
plt.ylabel('Cumulative average')
plt.title(atitle)
plt.text(200,20,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 17 Oct 2018
# Function: batsmanCumulativeStrikeRate
# This function computes and plots the cumulative average strike rate of a batsman
#
###########################################################################################
def batsmanCumulativeStrikeRate(file,name="A Leg Glance"):
'''
Batsman's cumulative average strike rate
Description
This function computes and plots the cumulative average strike rate of a batsman
Usage
batsmanCumulativeStrikeRate(file,name= "A Leg Glance")
Arguments
file
Data frame
name
Name of batsman
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanCumulativeAverageRuns bowlerCumulativeAvgEconRate bowlerCumulativeAvgWickets
Examples
## Not run:
batsmanCumulativeStrikeRate("tendulkar.csv", "<NAME>")
'''
batsman= clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
strikeRate=pd.to_numeric(batsman['SR'])
# Compute cumulative strike rate
cumStrikeRate = strikeRate.cumsum()/pd.Series(np.arange(1, len(strikeRate)+1), strikeRate.index)
atitle = name + "- Cumulative Strike rate vs No of innings"
plt.xlabel('Innings')
plt.ylabel('Cumulative Strike Rate')
plt.title(atitle)
plt.plot(cumStrikeRate)
plt.text(200,60,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 13 Oct 2018
# Function: batsman6s
# This function plots the batsman dismissals
#
###########################################################################################
def batsmanDismissals(file, name="A Squarecut"):
'''
Display a 3D Pie Chart of the dismissals of the batsman
Description
Display the dismissals of the batsman (caught, bowled, hit wicket etc) as percentages
Usage
batsmanDismissals(file, name="A Squarecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanMeanStrikeRate, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar= getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanDismissals("tendulkar.csv","<NAME>")
'''
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
d = batsman['Dismissal']
# Convert to data frame
df = pd.DataFrame(d)
df1=df['Dismissal'].groupby(df['Dismissal']).count()
df2 = pd.DataFrame(df1)
df2.columns=['Count']
df3=df2.reset_index(inplace=False)
# Plot a pie chart
plt.pie(df3['Count'], labels=df3['Dismissal'],autopct='%.1f%%')
atitle = name + "-Pie chart of dismissals"
plt.suptitle(atitle, fontsize=16)
plt.show()
plt.gcf().clear()
return
import numpy as np
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 28 Aug 2019
# Function: batsmanMeanStrikeRate
# This function plot the Mean Strike Rate of the batsman against Runs scored as a continous graph
#
###########################################################################################
def batsmanMeanStrikeRate(file, name="A Hitter"):
'''
batsmanMeanStrikeRate {cricketr} R Documentation
Calculate and plot the Mean Strike Rate of the batsman on total runs scored
Description
This function calculates the Mean Strike Rate of the batsman for each interval of runs scored
Usage
batsmanMeanStrikeRate(file, name = "A Hitter")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar <- getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanMeanStrikeRate("tendulkar.csv","<NAME>")
'''
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
runs= pd.to_numeric(batsman['Runs'])
# Create the histogram
hist, bins = np.histogram(runs, bins = 20)
midBin=[]
SR=[]
# Loop through
for i in range(1,len(bins)):
# Find the mean of the bins (Runs)
midBin.append(np.mean([bins[i-1],bins[i]]))
# Filter runs that are are between 2 bins
batsman['Runs']=pd.to_numeric(batsman['Runs'])
a=(batsman['Runs'] > bins[i-1]) & (batsman['Runs'] <= bins[i])
df=batsman[a]
SR.append(np.mean(pd.to_numeric(df['SR']))) # Changed 28-8-2019
atitle = name + "-" + "Strike rate in run ranges"
# Plot no of 4s and a 2nd order curve fit
plt.scatter(midBin, SR, alpha=0.5)
plt.plot(midBin, SR,color="r", alpha=0.5)
plt.xlabel('Runs')
plt.ylabel('Strike Rate')
plt.title(atitle)
plt.text(180, 50,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 17 Oct 2018
# Function: batsmanMovingAverage
# This function computes and plots the Moving Average of the batsman across his career
#
###########################################################################################
# Compute a moving average
def movingaverage(interval, window_size):
window= np.ones(int(window_size))/float(window_size)
return np.convolve(interval, window, 'same')
def batsmanMovingAverage(file,name="A Squarecut") :
'''
Calculate and plot the Moving Average of the batsman in his career
Description
This function calculates and plots the Moving Average of the batsman in his career
Usage
batsmanMovingAverage(file,name="A Squarecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMeanStrikeRate, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar <- getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanMovingAverage("tendulkar.csv","<NAME>")
'''
# Compute the moving average of the time series
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
runs=pd.to_numeric(batsman['Runs'])
date= pd.to_datetime(batsman['Start Date'])
atitle = name + "'s Moving average (Runs)"
# Plot the runs in grey colo
plt.plot(date,runs,"-",color = '0.75')
# Compute and plot moving average
y_av = movingaverage(runs, 50)
plt.xlabel('Date')
plt.ylabel('Runs')
plt.plot(date, y_av,"b")
plt.title(atitle)
plt.text('2002-01-03',150,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 14 Oct 2018
# Function: batsmanPerfBoxHist
# This function makes a box plot showing the mean, median and the 25th & 75th percentile runs. The
# histogram shows the frequency of scoring runs in different run ranges
#
###########################################################################################
# Plot the batting performance as a combined box plot and histogram
def batsmanPerfBoxHist(file, name="A Hitter"):
'''
Make a boxplot and a histogram of the runs scored by the batsman
Description
Make a boxplot and histogram of the runs scored by the batsman. Plot the Mean, Median, 25th and 75th quantile
Usage
batsmanPerfBoxHist(file, name="A Hitter")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMeanStrikeRate, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsman4s("tendulkar.csv","<NAME>")
'''
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
batsman['Runs']=pd.to_numeric(batsman['Runs'])
plt.subplot(2,1,1)
sns.boxplot(batsman['Runs'])
plt.subplot(2,1,2);
atitle = name + "'s" + " - Runs Frequency vs Runs"
plt.hist(batsman['Runs'],bins=20, edgecolor='black')
plt.xlabel('Runs')
plt.ylabel('Strike Rate')
plt.title(atitle,size=16)
plt.text(180, 70,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
from statsmodels.tsa.arima_model import ARIMA
import pandas as pd
import numpy as np
from statsmodels.tsa.seasonal import seasonal_decompose
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 20 Oct 2018
# Function: batsmanPerfForecast
# This function forecasts the batsmans performance based on past performance -
# To update
###########################################################################################
def batsmanPerfForecast(file, name="A Squarecut"):
'''
# To do: Currently ARIMA is used.
Forecast the batting performance based on past performances using Holt-Winters forecasting
Description
This function forecasts the performance of the batsman based on past performances using HoltWinters forecasting model
Usage
batsmanPerfForecast(file, name="A Squarecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMeanStrikeRate, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanPerfForecast("tendulkar.csv","<NAME>")
'''
batsman= clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
runs=batsman['Runs'].astype('float')
# Fit a ARIMA model
date= pd.to_datetime(batsman['Start Date'])
df=pd.DataFrame({'date':date,'runs':runs})
df1=df.set_index('date')
model = ARIMA(df1, order=(5,1,0))
model_fit = model.fit(disp=0)
print(model_fit.summary())
# plot residual errors
residuals = pd.DataFrame(model_fit.resid)
residuals.plot()
plt.show()
residuals.plot(kind='kde')
plt.show()
plt.gcf().clear()
print(residuals.describe())
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: batsmanPerfHomeAway
# This plots the batsman's performance in home versus abroad
#
###########################################################################################
def batsmanPerfHomeAway(file,name="A Hitter"):
'''
This function analyses the performance of the batsman at home and overseas
Description
This function plots the runs scored by the batsman at home and overseas
Usage
batsmanPerfHomeAway(file, name = "A Hitter")
Arguments
file
CSV file of batsman from ESPN Cricinfo obtained with getPlayerDataSp()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanMovingAverage batsmanRunsPredict batsmanPerfBoxHist bowlerContributionWonLost
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkarSp <-getPlayerDataSp(35320,".","tendulkarsp.csv","batting")
batsmanPerfHomeAway("tendulkarsp.csv","<NAME>")
'''
playersp = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
# Create separate DFs for home and away
home = playersp[playersp['ha'] == 1]
away = playersp[playersp['ha']==2]
home['venue']="Home"
away['venue']="Overseas"
df= pd.concat([home,away])
df['Runs']= pd.to_numeric(df['Runs'])
atitle = name + "-" + "- - Runs-Home & overseas"
ax = sns.boxplot(x='venue',y='Runs',data=df)
plt.title(atitle)
plt.text(0.5, 200,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import numpy as np
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 30 Jun 2015
# Function: batsmanRunsFreqPerf
# This function computes and plots the Moving Average of the batsman across his career
#
###########################################################################################
# Plot the performance of the batsman as a continous graph
# Create a performance plot between Runs and RunsFrequency
def batsmanRunsFreqPerf(file, name="A Hookshot"):
'''
Calculate and run frequencies in ranges of 10 runs and plot versus Runs the performance of the batsman
Description
This function calculates frequencies of runs in 10 run buckets and plots this percentage
Usage
batsmanRunsFreqPerf(file, name="A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar <- getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanRunsFreqPerf("tendulkar.csv","<NAME>")
'''
df = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
runs= | pd.to_numeric(df['Runs']) | pandas.to_numeric |
import argparse
import os
import warnings
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
columns = ['age', 'education', 'major industry code', 'class of worker', 'num persons worked for employer',
'capital gains', 'capital losses', 'dividends from stocks', 'income']
class_labels = [' - 50000.', ' 50000+.']
def print_shape(df):
negative_examples, positive_examples = np.bincount(df['income'])
print('Data shape: {}, {} positive examples, {} negative examples'.format(df.shape, positive_examples, negative_examples))
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--train-test-split-ratio', type=float, default=0.2)
args, _ = parser.parse_known_args()
print('Received arguments {}'.format(args))
input_data_path = os.path.join('/opt/ml/processing/input', 'census-income.csv')
print('Reading input data from {}'.format(input_data_path))
df = pd.read_csv(input_data_path)
df = pd.DataFrame(data=df, columns=columns)
df.dropna(inplace=True)
df.drop_duplicates(inplace=True)
df.replace(class_labels, [0, 1], inplace=True)
negative_examples, positive_examples = np.bincount(df['income'])
print('Data after cleaning: {}, {} positive examples, {} negative examples'.format(df.shape, positive_examples, negative_examples))
split_ratio = args.train_test_split_ratio
print('Splitting data into train and test sets with ratio {}'.format(split_ratio))
X_train, X_test, y_train, y_test = train_test_split(df.drop('income', axis=1), df['income'], test_size=split_ratio, random_state=0)
# TODO: Split again for validation
preprocess = make_column_transformer(
(['age', 'num persons worked for employer'], KBinsDiscretizer(encode='onehot-dense', n_bins=10)),
(['capital gains', 'capital losses', 'dividends from stocks'], StandardScaler()),
(['education', 'major industry code', 'class of worker'], OneHotEncoder(sparse=False))
)
print('Running preprocessing and feature engineering transformations')
train_features = preprocess.fit_transform(X_train)
# validation_features = preprocess.fit_transform(X_validation)
test_features = preprocess.transform(X_test)
print('Train data shape after preprocessing: {}'.format(train_features.shape))
# print('Validation data shape after preprocessing: {}'.format(validation_features.shape))
print('Test data shape after preprocessing: {}'.format(test_features.shape))
train_features_output_path = os.path.join('/opt/ml/processing/train', 'train_features.csv')
train_labels_output_path = os.path.join('/opt/ml/processing/train', 'train_labels.csv')
# validation_features_output_path = os.path.join('/opt/ml/processing/validation', 'validation_features.csv')
# validation_labels_output_path = os.path.join('/opt/ml/processing/validation', 'validation_labels.csv')
test_features_output_path = os.path.join('/opt/ml/processing/test', 'test_features.csv')
test_labels_output_path = os.path.join('/opt/ml/processing/test', 'test_labels.csv')
print('Saving training features to {}'.format(train_features_output_path))
pd.DataFrame(train_features).to_csv(train_features_output_path, header=False, index=False)
# print('Saving validation features to {}'.format(validation_features_output_path))
# pd.DataFrame(validation_features).to_csv(validation_features_output_path, header=False, index=False)
print('Saving test features to {}'.format(test_features_output_path))
| pd.DataFrame(test_features) | pandas.DataFrame |
import ast
import re
from datetime import datetime
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import wandb
color_list = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
def retrieve_values_from_name(fname):
return re.findall(r"[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?", fname)
def download_from_wandb(resdir):
project = 'SYK4Model'
# target_cfgs = {
# 'config.lr': 0.05,
# 'config.scheduler_name': 'constant',
# 'config.seed_SYK': 1
# }
print(f'Downloading experiment results from {project}')
print(f'| Results directory : {resdir}')
# print(f'| Target constraints: {target_cfgs}')
api = wandb.Api()
# runs = api.runs(project, filters=target_cfgs)
run_ids = TARGET_RUN_IDS.split('\n')
records = []
visited = set()
for run_id in run_ids:
if run_id in visited:
raise ValueError(f'There is a duplicated run id {run_id}.')
run = api.run(f'vqc-quantum/{project}/{run_id.strip()}')
visited.add(run_id)
if run.state == 'finished':
print(run.name)
if 'eigenvalues' not in run.config:
print(f'| Skip this run because eigenvalues info is not in config.')
continue
history = run.history()
eigvals_str = run.config['eigenvalues'].replace('\n', '')
eigvals_str = re.sub(' +', ',', eigvals_str)
try:
ground_state_energy = ast.literal_eval(eigvals_str)[0]
except ValueError as e:
print(str(e))
print(f'Parsing Error: eigvals_str: {eigvals_str}')
print(f'Retry to parse the first element')
# Some runs logs eigenvalues in the following format.
# [-5.69803132e-02+0.00000000e+00j ... 1.10259914e-16-4.19720017e-16j]
# Due to dots let us parse the first element and then get its real part.
v_str = eigvals_str.split(',')[0].strip('[')
print(f' - Retried string: {v_str}')
ground_state_energy = ast.literal_eval(v_str).real
best_step = history.loss.argmin()
min_energy_gap = np.abs(history.loss[best_step] - ground_state_energy) # |E(\theta) - E0|
fidelity = history['fidelity/ground'][best_step]
if run.config["n_qubits"] % 4 == 2: # SYK4 is degenerated.
fidelity += history['fidelity/next_to_ground'][best_step]
loss_threshold = 1e-4
hitting_time = float('inf')
for i, row in history.iterrows():
if np.abs(row['loss'] - ground_state_energy) < loss_threshold:
hitting_time = i
break
records.append(
dict(
n_qubits=run.config['n_qubits'],
n_layers=run.config['n_layers'],
min_energy_gap=min_energy_gap,
fidelity=fidelity,
hitting_time=hitting_time
)
)
print(records[-1])
df = | pd.DataFrame.from_records(records) | pandas.DataFrame.from_records |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Script is used to determine any potential sites that may be using uploading erroneous measurements. Sites may have 'outlier' values beacuse (running list):
# - They may be using a unit_concept_id that does not have a correspondining 'conversion' in '[unit_mapping.csv](https://github.com/all-of-us/curation/blob/develop/data_steward/resource_files/unit_mapping.csv)'.
from google.cloud import bigquery
# %reload_ext google.cloud.bigquery
client = bigquery.Client()
# %load_ext google.cloud.bigquery
# +
from notebooks import parameters
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import six
import scipy.stats
import pandas as pd
# -
measurement_ancestors = [
# lipids
40782589,
40795800,
40772572
# #cbc
# 40789356, 40789120, 40789179, 40772748,
# 40782735, 40789182, 40786033, 40779159
# #cbc w diff
# 40785788, 40785796, 40779195, 40795733,
# 40795725, 40772531, 40779190, 40785793,
# 40779191, 40782561, 40789266
#cmp
# 3049187, 3053283, 40775801, 40779224,
# 40782562, 40782579, 40785850, 40785861,
# 40785869, 40789180, 40789190, 40789527,
# 40791227, 40792413, 40792440, 40795730,
# 40795740, 40795754
#physical measurement
# 40654163,
# 40655804,
# 40654162,
# 40655805,
# 40654167,
# 40654164
]
DATASET = parameters.LATEST_DATASET
print("""
DATASET TO USE: {}
""".format(DATASET))
def find_descendants(DATASET, ancestor_concept):
"""
Function is used to find the descendants of a particular ancestor concept ID using
Bigquery.
This function then creates a long string of said 'descendant' concepts so it can
be used in future queries.
Parameters
----------
DATASET (string): string representing the dataset to be queried. Taken from the
parameters file
ancestor_concept (integer): integer that is the 'ancestor_concept_id' for a particular
set of labs
Returns
-------
string_desc_concepts(string): string of all the descendant concept IDs that
represent the concept_ids for the particular measurement set
"""
descendant_concepts = """
SELECT
DISTINCT
m.measurement_concept_id
FROM
`{}.unioned_ehr_measurement` m
LEFT JOIN
`{}.concept_ancestor` ca
ON
m.measurement_concept_id = ca.descendant_concept_id
WHERE
ca.ancestor_concept_id IN ({})
GROUP BY 1""".format(DATASET, DATASET, ancestor_concept)
print(descendant_concepts)
desc_concepts_df = pd.io.gbq.read_gbq(descendant_concepts,
dialect='standard')
print('success!')
descendant_concept_ids = desc_concepts_df['measurement_concept_id'].tolist()
string_desc_concepts = "("
num_descs = len(descendant_concept_ids)
for idx, concept_id in enumerate(descendant_concept_ids):
string_desc_concepts += str(concept_id)
if idx < num_descs - 1:
string_desc_concepts += ", "
else:
string_desc_concepts += ")"
return string_desc_concepts
def find_total_number_of_units_for_lab_type(DATASET, string_desc_concepts):
"""
Function is used to find the total number of records that have a unit_concept_id
for the 'cluster' of measurement concept IDs that represent a particular lab
type. The unit_concept_id must be:
a. non-null
b. not 0
Parameters
----------
DATASET (string): string representing the dataset to be queried. Taken from the
parameters file
string_desc_concepts(string): string of all the descendant concept IDs that
represent the concept_ids for the particular measurement set
Returns
-------
tot_units (int): represents the total number of recoreds for the particular
measurement set that have a unit_concept ID
"""
total_unit_concept_names = """
SELECT SUM(a.count) as tot_concepts
FROM
(SELECT
DISTINCT
c.concept_name as unit_name, c.standard_concept, COUNT(*) as count
FROM
`{}.unioned_ehr_measurement` m
LEFT JOIN
`{}.concept` c
ON
m.unit_concept_id = c.concept_id
WHERE
m.measurement_concept_id IN {}
AND
m.unit_concept_id IS NOT NULL
AND
m.unit_concept_id <> 0
GROUP BY 1, 2
ORDER BY count DESC) a
""".format(DATASET, DATASET, string_desc_concepts)
tot_units_df = pd.io.gbq.read_gbq(total_unit_concept_names,
dialect='standard')
tot_units = tot_units_df['tot_concepts'].iloc[0]
return tot_units
def find_most_popular_unit_type(tot_units, DATASET, string_desc_concepts):
"""
Function is used to find the most popular unit type for the 'cluster'
of measurement concept IDs that represent a particular measurement set.
Parameters
----------
tot_units (int): represents the total number of recoreds for the particular
measurement set that have a unit_concept ID
DATASET (string): string representing the dataset to be queried. Taken from the
parameters file
string_desc_concepts(string): string of all the descendant concept IDs that
represent the concept_ids for the particular measurement set.
Returns
-------
most_pop_unit (string): string that represents the most popular unit concept
name for the particular measurement set.
"""
units_for_lab = """
SELECT
DISTINCT
c.concept_name as unit_name, c.standard_concept, COUNT(*) as count, ROUND(COUNT(*) / {} * 100, 2) as percentage_units
FROM
`{}.unioned_ehr_measurement` m
LEFT JOIN
`{}.concept` c
ON
m.unit_concept_id = c.concept_id
WHERE
m.measurement_concept_id IN {}
AND
m.unit_concept_id IS NOT NULL
AND
m.unit_concept_id <> 0
GROUP BY 1, 2
ORDER BY count DESC
""".format(tot_units, DATASET, DATASET, string_desc_concepts)
units_for_lab_df = | pd.io.gbq.read_gbq(units_for_lab, dialect='standard') | pandas.io.gbq.read_gbq |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from preprocessor import Preprocessor #importing task1 file
from character import CharacterAnalyser #importing task2 file
from word import WordAnalyser #importing task3 file
from visualiser import AnalysisVisualiser # importing task4 file
import sys
#Making the objects of the respective classes
p=Preprocessor()
c=CharacterAnalyser()
w=WordAnalyser()
#function to convert normal df into relative df
def rel_func(accept_df):
accept_df=accept_df
total=sum(accept_df['count']) #counts the total of the column with counts
accept_df['rel_freq']= (accept_df['count']/total) #calculate the reklative frequency
return accept_df
#Below method performs required processing of data and return the relative frequency df
def perform_processing(input_file):
input_file=input_file
p.tokenise_word(input_file) #send the words to get tokenised
tokenised_punc_char_list=p.get_tokenised_punc_char_list() # retrieves the tokenised list which contains punctuation and characters
c.analyse_characters(tokenised_punc_char_list) #retrieves the punctuation frequency
#Required for task 4
pun_freq=c.get_punctuation_frequency() #store the punctuation frequency
letter_freq=c.get_letter_frequency() #store the letter frequency
analyse_words=p.get_tokenised_word_list() #get the tokensied word list
w.analyse_words(analyse_words) #send it for processing
#Required for task 4
stopword_freq=w.get_stopword_frequency() #store the stopword frequency
word_length_freq=w.get_word_length_frequency() #store the word length frequency
#relative freq of pun
pun_rel_freq=rel_func(pun_freq) #convert normal df into relative frequency df
pun_rel_freq.set_index('char', inplace=True)
pun_rel_freq=pun_freq[['rel_freq']]
#relative freq of letter
letter_rel_freq=rel_func(letter_freq) #convert normal df into relative frequency df
letter_rel_freq.set_index('char', inplace=True)
letter_rel_freq=letter_rel_freq[['rel_freq']]
#relative freq of stop word
stopword_rel_freq=rel_func(stopword_freq) #convert normal df into relative frequency df
stopword_rel_freq.set_index('stop_word', inplace=True)
stopword_rel_freq=stopword_rel_freq[['rel_freq']]
#relative freq of stop word length
wordlen_rel_freq=rel_func(word_length_freq) #convert normal df into relative frequency df
wordlen_rel_freq.set_index('wordlen', inplace=True)
wordlen_rel_freq=wordlen_rel_freq[['rel_freq']]
return pun_rel_freq,letter_rel_freq,stopword_rel_freq,wordlen_rel_freq
#Below method is used to implement the visualisation
def visualise(selection,accept_stats_df):
if selection == 'pun': # if the visualisation is punctuation then proceed
a=AnalysisVisualiser(accept_stats_df)
a.visualise_punctuation_frequency()
elif selection == 'letter': # if the visualisation is letter then proceed
a=AnalysisVisualiser(accept_stats_df)
a.visualise_character_frequency()
elif selection == 'stopword': # if the visualisation is stop word then proceed
a=AnalysisVisualiser(accept_stats_df)
a.visualise_stopword_frequency()
else: # else the visualisation is word length and proceed
a=AnalysisVisualiser(accept_stats_df)
a.visualise_word_length_frequency()
def main():
try:
#Read the 6 files and store them
with open('Edward_II_Marlowe.tok', 'r') as input_file:
edward_inputfile = input_file.read()
input_file.close()
with open('Hamlet_Shakespeare.tok', 'r') as input_file:
hamplet_inputfile = input_file.read()
input_file.close()
with open('Henry_VI_Part1_Shakespeare.tok', 'r') as input_file:
henry_part1_inputfile = input_file.read()
input_file.close()
with open('Henry_VI_Part2_Shakespeare.tok', 'r') as input_file:
henry_part2_inputfile = input_file.read()
input_file.close()
with open('Jew_of_Malta_Marlowe.tok', 'r') as input_file:
jew_inputfile = input_file.read()
input_file.close()
with open('Richard_II_Shakespeare.tok', 'r') as input_file:
richard_inputfile = input_file.read()
input_file.close()
#in below step send the individual input file to processing and the return has respective frequency of statistics
edward_pun,edward_letter,edward_stopword,edward_wordlen=perform_processing(edward_inputfile)
hamlet_pun,hamlet_letter,hamlet_stopword,hamlet_wordlen=perform_processing(hamplet_inputfile)
henry_part1_pun,henry_part1_letter,henry_part1_stopword,henry_part1_wordlen=perform_processing(henry_part1_inputfile)
henry_part2_pun,henry_part2_letter,henry_part2_stopword,henry_part2_wordlen=perform_processing(henry_part2_inputfile)
jew_pun,jew_letter,jew_stopword,jew_wordlen=perform_processing(jew_inputfile)
richard_pun,richard_letter,richard_stopword,richard_wordlen=perform_processing(richard_inputfile)
# Merge total Letter from 6 files into single df and print
total_letter_df= | pd.DataFrame() | pandas.DataFrame |
# -*- coding:utf-8 -*-
# =========================================================================== #
# Project : Ames House Prediction Model #
# File : \eda.py #
# Python : 3.9.1 #
# --------------------------------------------------------------------------- #
# Author : <NAME> #
# Company : nov8.ai #
# Email : <EMAIL> #
# URL : https://github.com/john-james-sf/Ames/ #
# --------------------------------------------------------------------------- #
# Created : Tuesday, March 9th 2021, 11:06:05 pm #
# Last Modified : Tuesday, March 9th 2021, 11:06:05 pm #
# Modified By : <NAME> (<EMAIL>) #
# --------------------------------------------------------------------------- #
# License : BSD #
# Copyright (c) 2021 nov8.ai #
# =========================================================================== #
#%%
import pandas as pd
import numpy as np
import fnmatch
import os
from tabulate import tabulate
# --------------------------------------------------------------------------- #
class DataBuilder:
"""Combines all training set splits into a single file."""
def __init__(self, inpath="../data/raw/", outpath="../data/interim/"):
self._inpath = inpath
self._outpath = outpath
self.X_train = None
self.y_train = None
def build_data(self):
train = pd.DataFrame()
for filename in os.listdir(self._inpath):
if fnmatch.fnmatch(filename, "*train.csv"):
df = pd.read_csv(os.path.join(self._inpath, filename))
train = | pd.concat((train, df), axis=0) | pandas.concat |
import pandas
import os
import ast
def create_CSV_pipeline1(
platename, seriesperwell, path, illum_path, platedict, one_or_many, Channeldict
):
if one_or_many == "one":
print("CSV creation not enabled for Channeldict for one file/well")
return
else:
columns_per_channel = ["PathName_", "FileName_", "Frame_"]
columns = ["Metadata_Plate", "Metadata_Series", "Metadata_Site"]
channels = []
Channeldict = ast.literal_eval(Channeldict)
rounddict = {}
Channelrounds = list(Channeldict.keys())
for eachround in Channelrounds:
templist = []
templist += Channeldict[eachround].values()
channels += list(i[0] for i in templist)
rounddict[eachround] = list(i[0] for i in templist)
df = pandas.DataFrame(columns=columns)
for chan in channels:
listoffiles = []
for round in rounddict.keys():
if chan in rounddict[round]:
for well in platedict.keys():
listoffiles.append(platedict[well][round])
listoffiles = [x for l in listoffiles for x in l]
df["FileName_Orig" + chan] = listoffiles
df["Metadata_Plate"] = [platename] * len(listoffiles)
df["Metadata_Series"] = list(range(seriesperwell)) * len(platedict.keys())
for eachround in Channelrounds:
pathperround = path + eachround + "/"
for chan in channels:
for i in list(Channeldict[eachround].values()):
if chan == i[0]:
df["PathName_Orig" + chan] = pathperround
df["Frame_Orig" + chan] = i[1]
file_out_name = "/tmp/" + str(platename) + ".csv"
df.to_csv(file_out_name, index=False)
# Make .csv for 2_CP_ApplyIllum
df["Metadata_Site"] = df["Metadata_Series"]
well_df_list = []
well_val_df_list = []
for eachwell in platedict.keys():
well_df_list += [eachwell] * seriesperwell
wellval = eachwell.split("Well")[1]
if wellval[0] == "_":
wellval = wellval[1:]
well_val_df_list += [wellval] * seriesperwell
df["Metadata_Well"] = well_df_list
df["Metadata_Well_Value"] = well_val_df_list
for chan in channels:
listoffiles = []
for round in rounddict.keys():
if chan in rounddict[round]:
for well in platedict.keys():
listoffiles.append(platedict[well][round])
listoffiles = [x for l in listoffiles for x in l]
df["PathName_Illum" + chan] = [illum_path] * len(listoffiles)
df["FileName_Illum" + chan] = [platename + "_Illum" + chan + ".npy"] * len(
listoffiles
)
file_out_name_2 = "/tmp/" + str(platename) + ".csv"
df.to_csv(file_out_name_2, index=False)
return file_out_name, file_out_name_2
def create_CSV_pipeline3(platename, seriesperwell, path, well_list, range_skip):
columns = [
"Metadata_Plate",
"Metadata_Site",
"Metadata_Well",
"Metadata_Well_Value",
]
columns_per_channel = ["PathName_", "FileName_"]
channels = ["DNA", "Phalloidin"]
columns += [col + chan for col in columns_per_channel for chan in channels]
df = pandas.DataFrame(columns=columns)
sitelist = list(range(0, seriesperwell, range_skip))
sites_per_well = len(sitelist)
total_file_count = sites_per_well * len(well_list)
df["Metadata_Plate"] = [platename] * total_file_count
df["Metadata_Site"] = sitelist * len(well_list)
well_df_list = []
well_val_df_list = []
parsed_well_list = []
for eachwell in well_list:
well_df_list += [eachwell] * sites_per_well
wellval = eachwell.split("Well")[1]
if wellval[0] == "_":
wellval = wellval[1:]
well_val_df_list += [wellval] * sites_per_well
parsed_well_list.append(wellval)
df["Metadata_Well"] = well_df_list
df["Metadata_Well_Value"] = well_val_df_list
path_list = [
os.path.join(path, platename + "-" + well)
for well in well_list
for site in sitelist
]
for chan in channels:
df["PathName_" + chan] = path_list
df["FileName_" + chan] = [
"Plate_"
+ platename
+ "_Well_"
+ well
+ "_Site_"
+ str(site)
+ "_Corr"
+ chan
+ ".tiff"
for well in parsed_well_list
for site in sitelist
]
file_out_name = "/tmp/" + str(platename) + ".csv"
df.to_csv(file_out_name, index=False)
return file_out_name
def create_CSV_pipeline5(
platename,
seriesperwell,
expected_cycles,
path,
platedict,
one_or_many,
fast_or_slow,
):
expected_cycles = int(expected_cycles)
columns = ["Metadata_Plate", "Metadata_Site", "Metadata_SBSCycle"]
channels = ["OrigT", "OrigG", "OrigA", "OrigC", "OrigDNA"]
if one_or_many == "one" and fast_or_slow == "fast":
columns_per_channel = ["PathName_", "FileName_", "Series_", "Frame_"]
columns += [col + chan for col in columns_per_channel for chan in channels]
df = pandas.DataFrame(columns=columns)
well_list = platedict[1]
total_file_count = seriesperwell * len(well_list) * expected_cycles
df["Metadata_Plate"] = [platename] * total_file_count
df["Metadata_Site"] = (
list(range(seriesperwell)) * len(well_list) * expected_cycles
)
cycle_list = []
path_list = []
A_list = []
C_list = []
G_list = []
T_list = []
DNA_list = []
for cycle in range(1, (expected_cycles + 1)):
for eachwell in platedict[cycle]:
cycle_list += [int(cycle)] * seriesperwell
path_list += [
os.path.join(path, platedict[cycle][eachwell][0])
] * seriesperwell
T_list += [platedict[cycle][eachwell][1][0]] * seriesperwell
G_list += [platedict[cycle][eachwell][1][1]] * seriesperwell
A_list += [platedict[cycle][eachwell][1][2]] * seriesperwell
C_list += [platedict[cycle][eachwell][1][3]] * seriesperwell
DNA_list += [platedict[cycle][eachwell][1][4]] * seriesperwell
df["Metadata_SBSCycle"] = cycle_list
for chan in channels:
df["Series_" + chan] = (
list(range(seriesperwell)) * len(well_list) * expected_cycles
)
df["PathName_" + chan] = path_list
df["FileName_OrigT"] = T_list
df["FileName_OrigG"] = G_list
df["FileName_OrigA"] = A_list
df["FileName_OrigC"] = C_list
df["FileName_OrigDNA"] = DNA_list
df["Frame_OrigDNA"] = [0] * total_file_count
df["Frame_OrigG"] = ([1] * seriesperwell * len(well_list)) + (
[0] * seriesperwell * len(well_list) * (expected_cycles - 1)
)
df["Frame_OrigT"] = ([2] * seriesperwell * len(well_list)) + (
[0] * seriesperwell * len(well_list) * (expected_cycles - 1)
)
df["Frame_OrigA"] = ([3] * seriesperwell * len(well_list)) + (
[0] * seriesperwell * len(well_list) * (expected_cycles - 1)
)
df["Frame_OrigC"] = ([4] * seriesperwell * len(well_list)) + (
[0] * seriesperwell * len(well_list) * (expected_cycles - 1)
)
elif one_or_many == "many" and fast_or_slow == "slow":
columns_per_channel = ["PathName_", "FileName_", "Frame_"]
columns += [col + chan for col in columns_per_channel for chan in channels]
df = pandas.DataFrame(columns=columns)
well_list = platedict[1]
total_file_count = seriesperwell * len(well_list) * expected_cycles
df["Metadata_Plate"] = [platename] * total_file_count
df["Metadata_Site"] = (
list(range(seriesperwell)) * len(well_list) * expected_cycles
)
cycle_list = []
path_list = []
file_list = []
for cycle in range(1, (expected_cycles + 1)):
for eachwell in platedict[cycle]:
cycle_list += [int(cycle)] * seriesperwell
path_list += [
os.path.join(path, platedict[cycle][eachwell][0])
] * seriesperwell
file_list += platedict[cycle][eachwell][1]
df["Metadata_SBSCycle"] = cycle_list
for chan in channels:
df["PathName_" + chan] = path_list
df["FileName_" + chan] = file_list
df["Frame_OrigDNA"] = [0] * total_file_count
df["Frame_OrigG"] = [1] * total_file_count
df["Frame_OrigT"] = [2] * total_file_count
df["Frame_OrigA"] = [3] * total_file_count
df["Frame_OrigC"] = [4] * total_file_count
file_out_name = "/tmp/" + str(platename) + ".csv"
df.to_csv(file_out_name, index=False)
return file_out_name
def create_CSV_pipeline6(
platename,
seriesperwell,
expected_cycles,
path,
illum_path,
platedict,
one_or_many,
fast_or_slow,
):
expected_cycles = int(expected_cycles)
if one_or_many == "one" and fast_or_slow == "fast":
columns = [
"Metadata_Plate",
"Metadata_Series",
"Metadata_Well",
"Metadata_Well_Value",
"Metadata_ArbitraryGroup",
]
columns_per_channel = ["PathName_", "FileName_", "Series_", "Frame_"]
cycles = ["Cycle%02d_" % x for x in range(1, expected_cycles + 1)]
or_il = ["Orig", "Illum"]
channels = ["A", "C", "G", "T", "DNA"]
columns += [
col + cycle + oi + channel
for col in columns_per_channel
for cycle in cycles
for oi in or_il
for channel in channels
]
df = pandas.DataFrame(columns=columns)
well_list = list(platedict["1"].keys())
total_file_count = seriesperwell * len(well_list)
df["Metadata_Plate"] = [platename] * total_file_count
df["Metadata_Series"] = list(range(seriesperwell)) * len(well_list)
df["Metadata_ArbitraryGroup"] = list(range(19)) * 19 * len(well_list)
well_df_list = []
well_val_df_list = []
for eachwell in well_list:
well_df_list += [eachwell] * seriesperwell
wellval = eachwell.split("Well")[1]
if wellval[0] == "_":
wellval = wellval[1:]
well_val_df_list += [wellval] * seriesperwell
df["Metadata_Well"] = well_df_list
df["Metadata_Well_Value"] = well_val_df_list
for cycle in range(1, (expected_cycles + 1)):
this_cycle = "Cycle%02d_" % cycle
path_list = []
A_list = []
C_list = []
G_list = []
T_list = []
DNA_list = []
for eachwell in platedict[str(cycle)]:
path_list += [
os.path.join(path, platedict[str(cycle)][eachwell][0])
] * seriesperwell
T_list += [platedict[str(cycle)][eachwell][1][0]] * seriesperwell
G_list += [platedict[str(cycle)][eachwell][1][1]] * seriesperwell
A_list += [platedict[str(cycle)][eachwell][1][2]] * seriesperwell
C_list += [platedict[str(cycle)][eachwell][1][3]] * seriesperwell
DNA_list += [platedict[str(cycle)][eachwell][1][4]] * seriesperwell
for chan in channels:
df["Series_" + this_cycle + "Orig" + chan] = list(
range(seriesperwell)
) * len(well_list)
df["PathName_" + this_cycle + "Orig" + chan] = path_list
df["Series_" + this_cycle + "Illum" + chan] = df[
"Frame_" + this_cycle + "Illum" + chan
] = [0] * total_file_count
df["PathName_" + this_cycle + "Illum" + chan] = [
illum_path
] * total_file_count
df["FileName_" + this_cycle + "Illum" + chan] = [
platename + "_Cycle" + str(cycle) + "_Illum" + chan + ".npy"
] * total_file_count # this name doesn't have digit padding
df["FileName_" + this_cycle + "OrigT"] = T_list
df["FileName_" + this_cycle + "OrigG"] = G_list
df["FileName_" + this_cycle + "OrigA"] = A_list
df["FileName_" + this_cycle + "OrigC"] = C_list
df["FileName_" + this_cycle + "OrigDNA"] = DNA_list
df["Frame_" + this_cycle + "OrigDNA"] = [0] * total_file_count
if cycle == 1:
df["Frame_" + this_cycle + "OrigG"] = [1] * total_file_count
df["Frame_" + this_cycle + "OrigT"] = [2] * total_file_count
df["Frame_" + this_cycle + "OrigA"] = [3] * total_file_count
df["Frame_" + this_cycle + "OrigC"] = [4] * total_file_count
else:
df["Frame_" + this_cycle + "OrigG"] = df[
"Frame_" + this_cycle + "OrigT"
] = df["Frame_" + this_cycle + "OrigA"] = df[
"Frame_" + this_cycle + "OrigC"
] = (
[0] * total_file_count
)
elif one_or_many == "many" and fast_or_slow == "slow":
columns = [
"Metadata_Plate",
"Metadata_Site",
"Metadata_Well",
"Metadata_Well_Value",
]
columns_per_channel = ["PathName_", "FileName_", "Frame_"]
cycles = ["Cycle%02d_" % x for x in range(1, expected_cycles + 1)]
or_il = ["Orig", "Illum"]
channels = ["A", "C", "G", "T", "DNA"]
columns += [
col + cycle + oi + channel
for col in columns_per_channel
for cycle in cycles
for oi in or_il
for channel in channels
]
df = pandas.DataFrame(columns=columns)
well_list = list(platedict["1"].keys())
total_file_count = seriesperwell * len(well_list)
df["Metadata_Plate"] = [platename] * total_file_count
df["Metadata_Site"] = list(range(seriesperwell)) * len(well_list)
well_df_list = []
well_val_df_list = []
for eachwell in well_list:
well_df_list += [eachwell] * seriesperwell
wellval = eachwell.split("Well")[1]
if wellval[0] == "_":
wellval = wellval[1:]
well_val_df_list += [wellval] * seriesperwell
df["Metadata_Well"] = well_df_list
df["Metadata_Well_Value"] = well_val_df_list
for cycle in range(1, (expected_cycles + 1)):
this_cycle = "Cycle%02d_" % cycle
path_list = []
file_list = []
for eachwell in platedict[str(cycle)]:
path_list += [
os.path.join(path, platedict[str(cycle)][eachwell][0])
] * seriesperwell
file_list += platedict[str(cycle)][eachwell][1]
for chan in channels:
df["PathName_" + this_cycle + "Orig" + chan] = path_list
df["Frame_" + this_cycle + "Illum" + chan] = [0] * total_file_count
df["PathName_" + this_cycle + "Illum" + chan] = [
illum_path
] * total_file_count
df["FileName_" + this_cycle + "Illum" + chan] = [
platename + "_Cycle" + str(cycle) + "_Illum" + chan + ".npy"
] * total_file_count # this name doesn't have digit padding
df["FileName_" + this_cycle + "Orig" + chan] = file_list
df["Frame_" + this_cycle + "OrigDNA"] = [0] * total_file_count
df["Frame_" + this_cycle + "OrigG"] = [1] * total_file_count
df["Frame_" + this_cycle + "OrigT"] = [2] * total_file_count
df["Frame_" + this_cycle + "OrigA"] = [3] * total_file_count
df["Frame_" + this_cycle + "OrigC"] = [4] * total_file_count
file_out_name = "/tmp/" + str(platename) + ".csv"
df.to_csv(file_out_name, index=False)
return file_out_name
def create_CSV_pipeline7(platename, seriesperwell, expected_cycles, path, well_list):
expected_cycles = int(expected_cycles)
columns = [
"Metadata_Plate",
"Metadata_Site",
"Metadata_Well",
"Metadata_Well_Value",
]
columns_per_channel = ["PathName_", "FileName_"]
cycles = ["Cycle%02d_" % x for x in range(1, expected_cycles + 1)]
channels = ["A", "C", "G", "T"]
columns += [
col + cycle + channel
for col in columns_per_channel
for cycle in cycles
for channel in channels
]
columns += ["PathName_Cycle01_DAPI", "FileName_Cycle01_DAPI"]
df = | pandas.DataFrame(columns=columns) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
from __future__ import print_function
"""A script to calculate TPM values for contigs or genes based on count files
TPM values are defined as in Wagner et al (Theory in Biosciences) 2012.
rg x rl x 10^6
TPM = --------------
flg x T
rg: reads mapped to gene g
rl: read length
flg: feature length
T: sum of rgxrl/flg for all genes
"""
import sys, pandas as pd, argparse, logging
import re
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
def gene_lengths_from_gff(gff_file):
gene_id_regex = re.compile('ID=([a-zA-Z_\-0-9]*);')
gene_lengths = {}
with open(gff_file) as fh:
for line in fh:
gene_id = gene_id_regex.findall(line)[0]
gene_lengths[gene_id] = abs(int(line.split(' ')[4]) - int(line.split(' ')[3])) + 1
return | pd.Series(gene_lengths) | pandas.Series |
from datetime import datetime
from typing import Dict, Optional, List
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from src.constants import remove_from_plots
from src.data.prices import get_prices, round_price
from src.plot.asset_history import plot_asset_history
import logging
logger = logging.getLogger(__name__)
QUOTE_COINS = ['USDT', 'BUSD', 'RUB']
class OrdersAnalyser:
def __init__(self, client_helper, orders):
self._orders = self.prepare_dataframe(orders)
self.client_helper = client_helper
self.width = 1200
self.height = 400
@property
def orders(self):
return self._orders
@staticmethod
def prepare_dataframe(orders: pd.DataFrame):
orders = orders.copy()
numerical_columns = ['price', 'origQty', 'executedQty', 'cummulativeQuoteQty']
for col in numerical_columns:
orders[col] = orders[col].astype(float)
# Use only filled orders
orders = orders[orders['status'] == 'FILLED']
# Replace payments with BUSD to USDT to simplify
orders.loc[orders['quote_coin'] == 'BUSD', 'quote_coin'] = 'USDT'
# Calculate executedCorrectedQty, needed for calculation mean buying price
updated_orders = []
for _, pair_orders in orders.groupby(['base_coin']):
updated_orders.append(calculate_corrected_balance_for_pair(pair_orders))
orders = pd.concat(updated_orders)
assert np.all(np.isin(orders['quote_coin'].unique(), QUOTE_COINS)), f'Only {QUOTE_COINS} quote coins allowed'
return orders
def calculate_mean_price(self):
orders = self._orders
average_prices = []
for base_coin, pair_orders in orders.groupby(['base_coin']):
quote_coin = pair_orders['quote_coin'].unique()
if len(quote_coin) > 1:
msg = f'can calculate average purchase price only with single quote_coin, ' \
f'but for {base_coin} there is several: {quote_coin}'
raise ValueError(msg)
quote_coin = quote_coin[0]
mask_buy = pair_orders['side'] == 'BUY'
average_price = (pair_orders.loc[mask_buy, 'price'] * pair_orders.loc[
mask_buy, 'executedCorrectedQty']).sum() / pair_orders.loc[mask_buy, 'executedCorrectedQty'].sum()
average_prices.append(
{'base_coin': base_coin, 'quote_coin': quote_coin, 'average_price': average_price,
'n_purchases': mask_buy.sum(), 'n_sales': (~mask_buy).sum()})
average_prices = pd.DataFrame(average_prices)
return average_prices
def plot_transactions(self, base_coin: str = 'BTC', price_history: pd.DataFrame = None,
add_mean_price: bool = True, add_last_price: bool = True):
plot_df = self.orders[self.orders['base_coin'] == base_coin]
assert np.all(
np.isin(plot_df['quote_coin'].unique(), QUOTE_COINS)), f'Only {QUOTE_COINS} quote coins are acceptable'
fig = px.scatter(plot_df, x='date', y="price", size='executedQty', color='side',
title=f'{base_coin} transactions', size_max=10, hover_data=['cummulativeQuoteQty'])
if price_history is not None:
fig.add_trace(
go.Scatter(x=price_history['date'], y=price_history['Close'], mode='lines', name='history',
marker_color='grey'))
if add_mean_price:
mean_price = self.calculate_mean_price()
mean_price = mean_price.loc[mean_price['base_coin'] == base_coin, 'average_price'].item()
fig.add_hline(y=mean_price, line_dash="dot",
annotation_text=f'average purchase price = {round_price(mean_price)} usdt',
annotation_position="bottom right")
if add_last_price:
last_price = price_history.iloc[-1]
fig.add_annotation(
x=last_price['date'],
y=last_price['Close'],
text=f"Last price = {round_price(last_price['Close'])} usdt",
arrowhead=2,
)
fig.update_xaxes(
rangeslider_visible=True,
rangeselector=dict(
buttons=list([
dict(count=1, label="1m", step="month", stepmode="backward"),
dict(count=6, label="6m", step="month", stepmode="backward"),
dict(count=1, label="1y", step="year", stepmode="backward"),
dict(step="all"),
])
),
type='date'
)
fig.update_layout(yaxis_title='USDT', width=self.width, height=self.height, xaxis_fixedrange=False,
yaxis_fixedrange=False)
return fig
def plot_transactions_many(self, coins):
fig_dict = {}
for base_coin in coins: # ['LTC', 'ETH']:
price_history = self.client_helper.get_historical_prices(base_coin + 'USDT', start_date='1 Jan, 2021')
fig = self.plot_transactions(base_coin, price_history)
fig_dict[base_coin] = fig
return fig_dict
def prepare_coins_asset_history(self) -> Dict[str, pd.DataFrame]:
coins_asset_history = {}
prices = get_prices(self.client_helper)
for base_coin, pair_orders in self.orders.groupby('base_coin'):
if base_coin in remove_from_plots:
continue
price_history = prices[['date', base_coin]]
price_history.columns = ['date', 'price']
asset_history = calculate_asset_worth_history(pair_orders, price_history)
coins_asset_history[base_coin] = asset_history
return coins_asset_history
def plot_coins_asset_history(self, coins_asset_history: Dict[str, pd.DataFrame], items: Optional[List] = None):
fig_dict = {}
if items is None:
items = coins_asset_history.keys()
for item in items:
plot_df = coins_asset_history[item]
fig = plot_asset_history(plot_df, title=f'{item} asset value history', width=self.width, height=self.height)
fig_dict[item] = fig
return fig_dict
def plot_full_asset_history(self, coins_asset_history: Dict[str, pd.DataFrame], items: Optional[List] = None):
cash_df = []
coin_df = []
if items is None:
items = coins_asset_history.keys()
for item in items:
plot_df = coins_asset_history[item]
cash_df.append(plot_df[['date', 'usdt_cash_in_cum']].set_index('date'))
coin_df.append(plot_df[['date', 'coin_cum_usdt_value']].set_index('date'))
cash_df = pd.concat(cash_df, axis=1).ffill().sum(axis=1)
cash_df.name = 'usdt_cash_in_cum'
coin_df = pd.concat(coin_df, axis=1).ffill().sum(axis=1)
coin_df.name = 'coin_cum_usdt_value'
full_asset_history = | pd.concat([cash_df, coin_df], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 6 22:23:07 2020
@author: atidem
"""
import pandas as pd
import numpy as np
from statsmodels.tsa.holtwinters import ExponentialSmoothing
from statsmodels.tsa.ar_model import AR,ARResults
from statsmodels.tsa.arima_model import ARIMA,ARMA,ARIMAResults,ARMAResults
from pmdarima import auto_arima
from sklearn.metrics import mean_absolute_error,mean_squared_error
import matplotlib.pyplot as plt
from matplotlib.pyplot import show
import warnings
warnings.filterwarnings("ignore")
from math import sqrt
import matplotlib as mt
import statsmodels as st
import sklearn as sk
import worldometerDataHandler as handle
from statsmodels.tsa.statespace.sarimax import SARIMAX
#%% Notes
## default test rate 0.2 , replaceable (Ar,Arma,Arima)
## worldometer link , replaceable
## have to search parameter for each country
## russian data have "did not converge" exception
## edited library for keep going to work in exception "lib\site-packages\numpy\linalg\linalg.py"
#%%
resultsData = []
dataPosLenDeath = 0
dataPosLenCases = 0
#%% temp method
def runAllMethods():
#%% get data from worldometer's link
global dataPosLenCases,dataPosLenDeath
getData = handle.GetDataFromWorldometer(url)
df = getData.handleData()
#%%
dataLen = len(df)
#positivity
dataPosDeath = df[df.Deaths>0]
dataPosLenDeath = len(dataPosDeath)
dataPosCases = df[df.Cases>0]
dataPosLenCases = len(dataPosCases)
# size of predict(daily)
predDayCount = 30
# total range
totalIdx = pd.date_range(df.index[0],periods=dataLen+predDayCount,freq='D')
#df["Cases"][:pd.to_datetime("19.3.2020",format="%d.%m.%Y")]
#%% measure metrics
def mape(a,b):
mask = a != 0
return (np.fabs(a - b)/a)[mask].mean()
def mae(a,b):
return mean_absolute_error(a,b)
def rmse(a,b):
return sqrt(mean_squared_error(a,b))
#%% Holt Winters
"""
---Holt Winters---
alpha = smoothing_level
beta = smoothing_slope
gamma = smoothing_seasonal
phi = damping_slope
tren = mul , add
seasonal = mul , add
seasonal period
damped = True , False
Gonna add user interface
"""
def holtWinters(data,alpha=None,beta=None,gamma=None,phi=None,tren=None,seasonal='add',period=None,damp=False):
dataPos = data[data>0]
dataPosLen = len(dataPos)
dataPos = pd.to_numeric(dataPos,downcast='float')
#print(dataPos)
pred = pd.DataFrame(index=totalIdx)
model = ExponentialSmoothing(dataPos[:dataPosLen],trend=tren,seasonal=seasonal,seasonal_periods=period,damped=damp)
pred["Fitted_Values"] = model.fit(smoothing_level=alpha,smoothing_slope=beta,smoothing_seasonal=gamma,damping_slope=phi).fittedvalues
pred["Predicted_Values"] = pd.Series(model.predict(model.params,start=df.index[-1],end=totalIdx[-1]),index=totalIdx[dataLen-1:])
return pred
## Holt Winters Prediction Section
## default values (alpha=None,beta=None,gamma=None,phi=None,tren=None,seasonal='add',period=None,damp=False)
Case_mul_mul = holtWinters(data=df.Cases,alpha=0.25,beta=0.25,gamma=0,tren='mul',seasonal='mul',period=dataPosLenCases-1,damp=True)
Case_mul_mul.rename(columns={"Fitted_Values":"Cases_hw_tes_mul-mul","Predicted_Values": "Cases_predict_hw_tes_mul"},inplace=True)
Case_add_add = holtWinters(data=df.Cases,alpha=0.9,beta=0.9,gamma=0,tren='add',seasonal='add',period=dataPosLenCases-1,damp=False)
Case_add_add.rename(columns={"Fitted_Values":"Cases_hw_tes_add-add","Predicted_Values": "Cases_predict_hw_tes_add"},inplace=True)
Death_mul_mul = holtWinters(data=df.Deaths,alpha=0.9,beta=0.9,gamma=0,tren='mul',seasonal='mul',period=dataPosLenDeath-1,damp=True)
Death_mul_mul.rename(columns={"Fitted_Values":"Deaths_hw_tes_mul","Predicted_Values": "Deaths_predict_hw_tes_mul"},inplace=True)
Death_add_add = holtWinters(data=df.Deaths,alpha=0.9,beta=0.9,gamma=0,tren='add',seasonal='add',period=dataPosLenDeath-1,damp=False)
Death_add_add.rename(columns={"Fitted_Values":"Deaths_hw_tes_add","Predicted_Values": "Deaths_predict_hw_tes_add"},inplace=True)
## merge prediction and main dataframe
finalDf = pd.concat([df,Case_mul_mul,Case_add_add,Death_mul_mul,Death_add_add],axis=1)
#%% AutoRegresive
"""
--- AR ---
maxlag = int
method = cmle,mle
// Conditional maximum likelihood using OLS ,Unconditional (exact) maximum likelihood.
lagOpt = aic,bic,hqic,t-stat
//Akaike Information Criterion,Bayes Information Criterion,Hannan-Quinn Information Criterion,Based on last lag
trend = c,nc
//constant, no constant
"""
def ar(data,maxlag=None,metod='cmle',lagOpt='t-stat',trend='nc',testRate=0.2):
dataPos = data[data>0]
dataPosLen = len(dataPos)
splitIndex = int(dataPosLen*(1-testRate))
train = dataPos[:splitIndex]
test = dataPos[splitIndex:]
model = AR(train)
model = model.fit(maxlag=maxlag,method=metod,trend=trend,ic=lagOpt)
pred = model.predict(start=totalIdx[dataLen-dataPosLen+splitIndex],end=totalIdx[-1])
pred = | pd.DataFrame(pred) | pandas.DataFrame |
#
# A program to output (to standard output) text to place in first dataset table
#
# It reads:
# 1) DEFINITIONS.csv
# 2) UNIQUES.csv
import pandas as pd
import argparse, os
def num(n):
return( '{:,d}'.format(n) )
def extract_string_and_url(d):
s = d.split(']')
n = s[0]
if len(s) > 1:
u = s[1]
else:
u = ''
n = n.replace('[', '')
u = u.replace('(', '').replace(')', '')
return(n, u)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Program to generate the first table on nCoV web page.')
parser.add_argument('-D', '--definitions', help='definitions file', default='DEFINITIONS.csv')
parser.add_argument('-U', '--uniques', help='uniques file', default='UNIQUES.csv')
parser.add_argument('-o', '--output', help='output directory', default='outputs')
args = parser.parse_args()
definitions = pd.read_csv(args.definitions, header=None)
definitions.columns = ['Key', 'Definition']
unique_info = pd.read_csv(args.uniques, header=None)
unique_info.columns = ['Key', 'Total', 'Unique']
full = | pd.merge(definitions, unique_info, on="Key") | pandas.merge |
#!/bin/env python3
"""create_csv_of_kp_predicate_triples.py
Creates a CSV of all predicate triples of the form (node type, edge type, node type) for KG1, KG2, and BTE (ARAX's current knowledge providers).
Resulting columns are: subject_type, edge_type, object_type
Usage: python create_csv_of_kp_predicate_triples.py
"""
# adapted from <NAME> code in create_csv_of_kp_node_pairs.py
import requests
import sys
import os
import csv
import time
import pandas as pd
from neo4j import GraphDatabase
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../../") # code directory
from RTXConfiguration import RTXConfiguration
def run_neo4j_query(cypher, kg_name, data_type):
rtx_config = RTXConfiguration()
if kg_name != "KG1":
rtx_config.live = kg_name
driver = GraphDatabase.driver(rtx_config.neo4j_bolt, auth=(rtx_config.neo4j_username, rtx_config.neo4j_password))
with driver.session() as session:
start = time.time()
print(f"Grabbing {data_type} from {kg_name} neo4j...")
results = session.run(cypher).data()
print(f"...done. Query took {round((time.time() - start) / 60, 2)} minutes.")
driver.close()
return results
def get_kg1_predicate_triples():
cypher = 'match (n)-[r]->(m) with distinct labels(n) as n1s, type(r) as rel, '+\
'labels(m) as n2s unwind n1s as n1 unwind n2s as n2 with distinct n1 as '+\
'node1, rel as relationship, n2 as node2 where node1 <> "Base" and '+\
'node2 <> "Base" return node1, relationship, node2'
# Changed this from using n.category so that it can handle node with multiple labels
# Unfortunetly that makes the cypher a little more unweildly and likely slows a query a bit.
results = run_neo4j_query(cypher, "KG1", "predicate triples")
triples_dict = {"subject":[], "predicate":[], "object":[]}
for result in results:
subject_type = result.get('node1')
object_type = result.get('node2')
predicate = result.get('relationship')
triples_dict['subject'].append(subject_type)
triples_dict['object'].append(object_type)
triples_dict['predicate'].append(predicate)
return pd.DataFrame(triples_dict)
def get_kg2_predicate_triples():
cypher = 'match (n)-[r]->(m) with distinct labels(n) as n1s, type(r) as rel, '+\
'labels(m) as n2s unwind n1s as n1 unwind n2s as n2 with distinct n1 as '+\
'node1, rel as relationship, n2 as node2 where node1 <> "Base" and '+\
'node2 <> "Base" return node1, relationship, node2'
# Changed this from using n.category so that it can handle node with multiple labels
# Unfortunetly this makes the cypher a little more unweildly and likely slows a query a bit.
results = run_neo4j_query(cypher, "KG2", "predicate triples")
triples_dict = {"subject":[], "predicate":[], "object":[]}
for result in results:
subject_type = result.get('node1')
object_type = result.get('node2')
predicate = result.get('relationship')
triples_dict['subject'].append(subject_type)
triples_dict['object'].append(object_type)
triples_dict['predicate'].append(predicate)
return pd.DataFrame(triples_dict)
def get_kg2c_predicate_triples():
cypher = 'match (n)-[r]->(m) with distinct labels(n) as n1s, type(r) as rel, '+\
'labels(m) as n2s unwind n1s as n1 unwind n2s as n2 with distinct n1 as '+\
'node1, rel as relationship, n2 as node2 where node1 <> "Base" and '+\
'node2 <> "Base" return node1, relationship, node2'
# Changed this from using n.category so that it can handle node with multiple labels
# Unfortunetly this makes the cypher a little more unweildly and likely slows a query a bit.
results = run_neo4j_query(cypher, "KG2c", "predicate triples")
triples_dict = {"subject":[], "predicate":[], "object":[]}
for result in results:
subject_type = result.get('node1')
object_type = result.get('node2')
predicate = result.get('relationship')
triples_dict['subject'].append(subject_type)
triples_dict['object'].append(object_type)
triples_dict['predicate'].append(predicate)
return pd.DataFrame(triples_dict)
def get_kg1_node_labels():
cypher = 'call db.labels()'
results = run_neo4j_query(cypher, "KG1", "node labels")
labels_dict = {"label":[]}
for result in results:
label = result.get('label')
labels_dict["label"].append(label)
return pd.DataFrame(labels_dict)
def get_kg2_node_labels():
cypher = 'call db.labels()'
results = run_neo4j_query(cypher, "KG2", "node labels")
labels_dict = {"label":[]}
for result in results:
label = result.get('label')
labels_dict["label"].append(label)
return pd.DataFrame(labels_dict)
def get_kg2c_node_labels():
cypher = 'call db.labels()'
results = run_neo4j_query(cypher, "KG2c", "node labels")
labels_dict = {"label":[]}
for result in results:
label = result.get('label')
labels_dict["label"].append(label)
return pd.DataFrame(labels_dict)
def get_kg1_relationship_types():
cypher = 'call db.relationshipTypes()'
results = run_neo4j_query(cypher, "KG1", "relationship types")
predicate_dict = {"predicate":[]}
for result in results:
predicate = result.get('relationshipType')
predicate_dict["predicate"].append(predicate)
return pd.DataFrame(predicate_dict)
def get_kg2_relationship_types():
cypher = 'call db.relationshipTypes()'
results = run_neo4j_query(cypher, "KG2", "relationship types")
predicate_dict = {"predicate":[]}
for result in results:
predicate = result.get('relationshipType')
predicate_dict["predicate"].append(predicate)
return | pd.DataFrame(predicate_dict) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
self.assertEqual(result, expected)
else:
self.assertTrue(expected.equals(result))
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
self.assertRaises(TypeError, lambda: df.loc[key])
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
self.assertRaises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame(
{'me': list('rttti'),
'foo': list('aaade'),
'bar': np.arange(5, dtype='float64') * 1.34 + 2,
'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me')
indexer = tuple(['r', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['r', 'bar'])
df = df_orig.copy()
df.loc[indexer] *= 2.0
self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['t', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from
# 0.12
def check(result, expected):
tm.assert_numpy_array_equal(result, expected)
| tm.assertIsInstance(result, np.ndarray) | pandas.util.testing.assertIsInstance |
import importlib.resources
from typing import Any, Optional
import numpy as np
import pandas as pd
from scipy.stats import pearsonr
from scipy.stats.mstats import rankdata
def from_file(data_file: str, data_file2: str, learn_options: dict[str, Any]) -> tuple:
if learn_options["V"] == 1: # from Nature Biotech paper
print(f"loading V{learn_options['V']} data")
if learn_options["weighted"] is not None:
raise AssertionError("not supported for V1 data")
_, gene_position, target_genes, x_df, y_df = read_V1_data(
data_file, learn_options
)
learn_options["binary target name"] = "average threshold"
learn_options["rank-transformed target name"] = "average rank"
learn_options["raw target name"] = "average activity"
elif learn_options["V"] == 2: # from Nov 2014, hot off the machines
x_df, _, target_genes, y_df, gene_position = read_V2_data(
data_file, learn_options
)
# check that data is consistent with sgRNA score
xx = x_df["sgRNA Score"].values
yy = y_df["score_drug_gene_rank"].values
rr, _ = pearsonr(xx, yy)
if rr <= 0:
raise AssertionError(
"data processing has gone wrong as correlation with previous "
"predictions is negative"
)
elif (
learn_options["V"] == 3
): # merge of V1 and V2--this is what is used for the final model
# these are relative to the V2 data, and V1 will be made to automatically match
learn_options["binary target name"] = "score_drug_gene_threshold"
learn_options["rank-transformed target name"] = "score_drug_gene_rank"
learn_options["raw target name"] = None
x_df, y_df, gene_position, target_genes = mergeV1_V2(
data_file, data_file2, learn_options
)
elif learn_options["V"] == 4: # merge of V1 and V2 and the Xu et al data
# these are relative to the V2 data, and V1 and Xu et al. will be made
# to automatically match
learn_options["binary target name"] = "score_drug_gene_threshold"
learn_options["rank-transformed target name"] = "score_drug_gene_rank"
learn_options["raw target name"] = None
x_df, y_df, gene_position, target_genes = merge_all(
data_file, data_file2, learn_options
)
elif learn_options["V"] == 5:
raise Exception(
"The from_file() function is attempting to learn using the xu_et_al data. "
"This data is no longer available with Azimuth."
)
# truncate down to 30--some data sets gave us more.
x_df["30mer"] = x_df["30mer"].apply(lambda x: x[0:30])
return x_df, y_df, gene_position, target_genes
def set_V2_target_names(learn_options: dict) -> dict:
if "binary target name" not in learn_options:
learn_options["binary target name"] = "score_drug_gene_threshold"
if "rank-transformed target name" not in learn_options:
learn_options["rank-transformed target name"] = "score_drug_gene_rank"
learn_options["raw target name"] = "score"
return learn_options
def get_ranks(
y: pd.Series, thresh: float = 0.8, prefix: Optional[str] = None, flip: bool = False
) -> tuple[pd.Series, pd.Series, pd.Series, pd.Series]:
"""
y should be a DataFrame with one column # so a series?
thresh is the threshold at which to call it a knock-down or not
col_name = 'score' is only for V2 data
flip should be FALSE for both V1 and V2
\f
Parameters
----------
y : :class:`pd.Series`
thresh : float
prefix : :class:`Optional[str]`
flip : bool
Return
------
y_rank : :class:`pd.Series`
y_rank_raw : :class:`pd.Series`
y_threshold : :class:`pd.Series`
y_quantized : :class:`pd.Series`
"""
if prefix is not None:
prefix = prefix + "_"
else:
prefix = ""
# y_rank = y.apply(ranktrafo)
y_rank = y.apply(rankdata)
y_rank /= y_rank.max()
if flip:
y_rank = (
1.0 - y_rank
) # before this line, 1-labels where associated with low ranks, this flips it around
# (hence the y_rank > thresh below)
# we should NOT flip (V2), see README.txt in ./data
y_rank.columns = [prefix + "rank"]
y_threshold = (y_rank > thresh) * 1
y_threshold.columns = [prefix + "threshold"]
# JL: undo the log2 transform (not sure this matters?)
y_rank_raw = (2 ** y).apply(rankdata)
y_rank_raw /= y_rank_raw.max()
if flip:
y_rank_raw = 1.0 - y_rank_raw
y_rank_raw.columns = [prefix + "rank raw"]
if np.any(np.isnan(y_rank)):
raise AssertionError("found NaN in ranks")
y_quantized = y_threshold.copy()
y_quantized.columns = [prefix + "quantized"]
return y_rank, y_rank_raw, y_threshold, y_quantized
def get_data(
data: pd.DataFrame,
y_names: list[str],
organism: str = "human",
target_gene: str = None,
) -> tuple[pd.DataFrame, pd.DataFrame]:
"""
this is called once for each gene (aggregating across cell types)
y_names are cell types
e.g. call: X_CD13, Y_CD13 = get_data(cd13, y_names=['NB4 CD13', 'TF1 CD13'])
\f
Parameters
----------
data : pd.DataFrame
y_names : List[str]
organism : str = "human"
target_gene : str = None
Return
------
features : :class:`pd.DataFrame`
output : :class:`pd.DataFrame`
"""
outputs = pd.DataFrame()
# generate ranks for each cell type before aggregating to match what is in Doench et al
thresh = 0.8
for y_name in y_names: # for each cell type
y = pd.DataFrame(data[y_name])
# these thresholds/quantils are not used:
y_rank, y_rank_raw, y_threshold, _ = get_ranks(y, thresh=thresh, flip=False)
y_rank.columns = [y_name + " rank"]
y_rank_raw.columns = [y_name + " rank raw"]
y_threshold.columns = [y_name + " threshold"]
outputs = pd.concat([outputs, y, y_rank, y_threshold, y_rank_raw], axis=1)
# aggregated rank across cell types
average_activity = pd.DataFrame(outputs[[y_name for y_name in y_names]].mean(1))
average_activity.columns = ["average activity"]
average_rank_from_avg_activity = get_ranks(
average_activity, thresh=thresh, flip=False
)[0]
average_rank_from_avg_activity.columns = ["average_rank_from_avg_activity"]
average_threshold_from_avg_activity = (average_rank_from_avg_activity > thresh) * 1
average_threshold_from_avg_activity.columns = [
"average_threshold_from_avg_activity"
]
average_rank = pd.DataFrame(
outputs[[y_name + " rank" for y_name in y_names]].mean(1)
)
average_rank.columns = ["average rank"]
# higher ranks are better (when flip=False as it should be)
average_threshold = (average_rank > thresh) * 1
average_threshold.columns = ["average threshold"]
# undo the log2 trafo on the reads per million, apply rank trafo right away
average_rank_raw = pd.DataFrame(
outputs[[y_name + " rank raw" for y_name in y_names]].mean(1)
)
average_rank_raw.columns = ["average rank raw"]
outputs = pd.concat(
[
outputs,
average_rank,
average_threshold,
average_activity,
average_rank_raw,
average_rank_from_avg_activity,
average_threshold_from_avg_activity,
],
axis=1,
)
# import pdb; pdb.set_trace()
# sequence-specific computations
# features = featurize_data(data)
# strip out featurization to later
features = pd.DataFrame(data["30mer"])
if organism == "human":
target_gene = y_names[0].split(" ")[1]
outputs["Target gene"] = target_gene
outputs["Organism"] = organism
features["Target gene"] = target_gene
features["Organism"] = organism
features["Strand"] = pd.DataFrame(data["Strand"])
return features, outputs
def combine_organisms(human_data: pd.DataFrame, mouse_data: pd.DataFrame) -> tuple:
# 'Target' is the column name, 'CD13' are some rows in that column
# xs slices through the pandas data frame to return another one
cd13 = human_data.xs("CD13", level="Target", drop_level=False)
# y_names are column names, cd13 is a pd object
x_cd13, y_cd13 = get_data(cd13, y_names=["NB4 CD13", "TF1 CD13"])
cd33 = human_data.xs("CD33", level="Target", drop_level=False)
x_cd33, y_cd33 = get_data(cd33, y_names=["MOLM13 CD33", "TF1 CD33", "NB4 CD33"])
cd15 = human_data.xs("CD15", level="Target", drop_level=False)
x_cd15, y_cd15 = get_data(cd15, y_names=["MOLM13 CD15"])
mouse_x = pd.DataFrame()
mouse_y = pd.DataFrame()
for k in mouse_data.index.levels[1]:
# is k the gene
x_df, y_df = get_data(
mouse_data.xs(k, level="Target", drop_level=False),
["On-target Gene"],
target_gene=k,
organism="mouse",
)
mouse_x = pd.concat([mouse_x, x_df], axis=0)
mouse_y = pd.concat([mouse_y, y_df], axis=0)
x_df = pd.concat([x_cd13, x_cd15, x_cd33, mouse_x], axis=0, sort=True)
y_df = pd.concat([y_cd13, y_cd15, y_cd33, mouse_y], axis=0, sort=True)
return x_df, y_df
def impute_gene_position(gene_position: pd.DataFrame) -> pd.DataFrame:
"""
Some amino acid cut position and percent peptide are blank because of stop codons, but
we still want a number for these, so just set them to 101 as a proxy
\f
Parameters
----------
Return
------
"""
gene_position["Percent Peptide"] = gene_position["Percent Peptide"].fillna(101.00)
if "Amino Acid Cut position" in gene_position.columns:
gene_position["Amino Acid Cut position"] = gene_position[
"Amino Acid Cut position"
].fillna(gene_position["Amino Acid Cut position"].mean())
return gene_position
def read_V1_data(
data_file: Optional[str] = None,
learn_options: Optional[dict] = None,
aml_file: Optional[str] = None,
) -> tuple:
if data_file is None:
data_file = importlib.resources.files("azimuth").joinpath(
"data", "V1_data.xlsx"
)
with importlib.resources.as_file(data_file) as data_file:
human_data = pd.read_excel(data_file, sheet_name=0, index_col=[0, 1])
mouse_data = pd.read_excel(data_file, sheet_name=1, index_col=[0, 1])
else:
human_data = pd.read_excel(data_file, sheet_name=0, index_col=[0, 1])
mouse_data = pd.read_excel(data_file, sheet_name=1, index_col=[0, 1])
x_df, y_df = combine_organisms(human_data, mouse_data)
# get position within each gene, then join and re-order
# note that 11 missing guides we were told to ignore
annotations = pd.read_csv(aml_file, delimiter="\t", index_col=[0, 4])
annotations.index.names = x_df.index.names
gene_position = pd.merge(
x_df, annotations, how="inner", left_index=True, right_index=True
)
gene_position = impute_gene_position(gene_position)
gene_position = gene_position[
["Amino Acid Cut position", "Nucleotide cut position", "Percent Peptide"]
]
y_df = y_df.loc[gene_position.index]
x_df = x_df.loc[gene_position.index]
y_df[
"test"
] = 1 # for bookkeeping to keep consistent with V2 which uses this for "extra pairs"
target_genes = y_df["Target gene"].unique()
y_df.index.names = ["Sequence", "Target gene"]
if not x_df.index.equals(y_df.index):
raise AssertionError(
"The index of x_df is different from the index of y_df "
"(this can cause inconsistencies/random performance later on)"
)
if learn_options is not None and learn_options["flipV1target"]:
print(
"************************************************************************\n"
"*****************MATCHING DOENCH CODE (DEBUG MODE)**********************\n"
"************************************************************************"
)
# normally it is:y_df['average threshold'] =y_df['average rank'] > 0.8, where
# 1s are good guides, 0s are not
y_df["average threshold"] = y_df["average rank"] < 0.2 # 1s are bad guides
print("press c to continue")
import pdb
pdb.set_trace()
return annotations, gene_position, target_genes, x_df, y_df
def read_V2_data(
data_file: str = None, learn_options: dict = None, verbose: bool = True
) -> tuple:
if data_file is None:
data_file = importlib.resources.files("azimuth").joinpath(
"data", "V2_data.xlsx"
)
with importlib.resources.as_file(data_file) as df:
data = pd.read_excel(
df,
sheet_name="ResultsFiltered",
skiprows=range(0, 6 + 1),
index_col=[0, 4],
)
else:
data = pd.read_excel(
data_file,
sheet_name="ResultsFiltered",
skiprows=range(0, 6 + 1),
index_col=[0, 4],
)
# grab data relevant to each of three drugs, which exludes some genes
# note gene MED12 has two drugs, all others have at most one
x_df = pd.DataFrame()
# This comes from the "Pairs" tab in their excel sheet,
# note HPRT/HPRT1 are same thing, and also PLX_2uM/PLcX_2uM
known_pairs = {
"AZD_200nM": ["CCDC101", "MED12", "TADA2B", "TADA1"],
"6TG_2ug/mL": ["HPRT1"],
"PLX_2uM": ["CUL3", "NF1", "NF2", "MED12"],
}
drugs_to_genes = {
"AZD_200nM": ["CCDC101", "MED12", "TADA2B", "TADA1"],
"6TG_2ug/mL": ["HPRT1"],
"PLX_2uM": ["CUL3", "NF1", "NF2", "MED12"],
}
if learn_options is not None:
if learn_options["extra pairs"] or learn_options["all pairs"]:
raise AssertionError(
"extra pairs and all pairs options (in learn_options) can't be "
"active simultaneously."
)
if learn_options["extra pairs"]:
drugs_to_genes["AZD_200nM"].extend(["CUL3", "NF1", "NF2"])
elif learn_options["all pairs"]:
drugs_to_genes["AZD_200nM"].extend(["HPRT1", "CUL3", "NF1", "NF2"])
drugs_to_genes["PLX_2uM"].extend(["HPRT1", "CCDC101", "TADA2B", "TADA1"])
drugs_to_genes["6TG_2ug/mL"].extend(
["CCDC101", "MED12", "TADA2B", "TADA1", "CUL3", "NF1", "NF2"]
)
count = 0
for drug in drugs_to_genes:
genes = drugs_to_genes[drug]
for gene in genes:
xtmp = data.copy().xs(gene, level="Target gene", drop_level=False)
xtmp["drug"] = drug
xtmp["score"] = xtmp[
drug
].copy() # grab the drug results that are relevant for this gene
if gene in known_pairs[drug]:
xtmp["test"] = 1.0
else:
xtmp["test"] = 0.0
count = count + xtmp.shape[0]
x_df = pd.concat([x_df, xtmp], axis=0)
if verbose:
print(
f"Loaded {xtmp.shape[0]} samples for gene {gene} "
f"\ttotal number of samples: {count}"
)
# create new index that includes the drug
x_df = x_df.set_index("drug", append=True)
y_df = pd.DataFrame(x_df.pop("score"))
y_df.columns.names = ["score"]
test_gene = pd.DataFrame(x_df.pop("test"))
target = pd.DataFrame(
x_df.index.get_level_values("Target gene").values,
index=y_df.index,
columns=["Target gene"],
)
y_df = pd.concat((y_df, target, test_gene), axis=1)
target_genes = y_df["Target gene"].unique()
gene_position = x_df[["Percent Peptide", "Amino Acid Cut position"]].copy()
# convert to ranks for each (gene, drug combo)
# flip = True
y_rank = pd.DataFrame()
y_threshold = pd.DataFrame()
y_quant = pd.DataFrame()
for drug in drugs_to_genes:
gene_list = drugs_to_genes[drug]
for gene in gene_list:
ytmp = pd.DataFrame(
y_df.xs((gene, drug), level=["Target gene", "drug"], drop_level=False)[
"score"
]
)
y_ranktmp, _, y_thresholdtmp, y_quanttmp = get_ranks(
ytmp, thresh=0.8, prefix="score_drug_gene", flip=False
)
# np.unique(y_rank.values-y_rank_raw.values)
y_rank = pd.concat((y_rank, y_ranktmp), axis=0)
y_threshold = pd.concat((y_threshold, y_thresholdtmp), axis=0)
y_quant = pd.concat((y_quant, y_quanttmp), axis=0)
yall = pd.concat((y_rank, y_threshold, y_quant), axis=1)
y_df = pd.merge(y_df, yall, how="inner", left_index=True, right_index=True)
# convert also by drug only, irrespective of gene
y_rank = pd.DataFrame()
y_threshold = pd.DataFrame()
y_quant = pd.DataFrame()
for drug in drugs_to_genes:
ytmp = pd.DataFrame(y_df.xs(drug, level="drug", drop_level=False)["score"])
y_ranktmp, _, y_thresholdtmp, y_quanttmp = get_ranks(
ytmp, thresh=0.8, prefix="score_drug", flip=False
)
# np.unique(y_rank.values-y_rank_raw.values)
y_rank = pd.concat((y_rank, y_ranktmp), axis=0)
y_threshold = pd.concat((y_threshold, y_thresholdtmp), axis=0)
y_quant = pd.concat((y_quant, y_quanttmp), axis=0)
yall = pd.concat((y_rank, y_threshold, y_quant), axis=1)
y_df = pd.merge(y_df, yall, how="inner", left_index=True, right_index=True)
gene_position = impute_gene_position(gene_position)
if learn_options is not None and learn_options["weighted"] == "variance":
print("computing weights from replicate variance...")
# compute the variance across replicates so can use it as a weight
data = pd.read_excel(
data_file,
sheet_name="Normalized",
skiprows=range(0, 6 + 1),
index_col=[0, 4],
)
data.index.names = ["Sequence", "Target gene"]
experiments = {
"AZD_200nM": ["Deep 25", "Deep 27", "Deep 29 ", "Deep 31"],
"6TG_2ug/mL": ["Deep 33", "Deep 35", "Deep 37", "Deep 39"],
"PLX_2uM": ["Deep 49", "Deep 51", "Deep 53", "Deep 55"],
}
variance = None
for drug in drugs_to_genes:
data_tmp = data.iloc[
data.index.get_level_values("Target gene").isin(drugs_to_genes[drug])
][experiments[drug]]
data_tmp["drug"] = drug
data_tmp = data_tmp.set_index("drug", append=True)
data_tmp["variance"] = np.var(data_tmp.values, axis=1)
if variance is None:
variance = data_tmp["variance"].copy()
else:
variance = pd.concat((variance, data_tmp["variance"]), axis=0)
orig_index = y_df.index.copy()
y_df = pd.merge(
y_df, | pd.DataFrame(variance) | pandas.DataFrame |
import pandas as pd
from autumn.tools.db import Database
from autumn.tools.utils.utils import create_date_index
from autumn.settings.constants import COVID_BASE_DATETIME
from .fetch import (
COVID_AU_CSV_PATH,
COVID_LGA_CSV_PATH,
MOBILITY_LGA_PATH,
COVID_VAC_COV_CSV,
COVID_AU_YOUGOV,
COVID_VIDA_VAC_CSV,
COVID_VIDA_POP_CSV,
)
def preprocess_covid_au(input_db: Database):
df = pd.read_csv(COVID_AU_CSV_PATH)
input_db.dump_df("covid_au", df)
df = pd.read_csv(COVID_LGA_CSV_PATH)
df = reshape_to_clusters(df)
input_db.dump_df("covid_dhhs_test", df)
df = | pd.read_csv(COVID_VAC_COV_CSV) | pandas.read_csv |
# pylint: disable=missing-module-docstring
import numpy as np
import pandas as pd
from sklearn.covariance import LedoitWolf
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples
from scipy.linalg import block_diag
from mlfinlab.portfolio_optimization.risk_estimators import RiskEstimators
class NCO:
"""
This class implements the Nested Clustered Optimization (NCO) algorithm, the Convex Optimization Solution (CVO),
the Monte Carlo Optimization Selection (MCOS) algorithm and sample data generating function. It is reproduced with
modification from the following paper: `<NAME> “A Robust Estimator of the Efficient Frontier”,
(2019). <https://papers.ssrn.com/abstract_id=3469961>`_.
"""
def __init__(self):
"""
Initialize
"""
return
@staticmethod
def allocate_cvo(cov, mu_vec=None):
"""
Estimates the Convex Optimization Solution (CVO).
Uses the covariance matrix and the mu - optimal solution.
If mu is the vector of expected values from variables, the result will be
a vector of weights with maximum Sharpe ratio.
If mu is a vector of ones, the result will be a vector of weights with
minimum variance.
:param cov: (np.array) Covariance matrix of the variables.
:param mu_vec: (np.array) Expected value of draws from the variables for maximum Sharpe ratio.
None if outputting the minimum variance portfolio.
:return: (np.array) Weights for optimal allocation.
"""
# Calculating the inverse covariance matrix
inv_cov = np.linalg.inv(cov)
# Generating a vector of size of the inverted covariance matrix
ones = np.ones(shape=(inv_cov.shape[0], 1))
if mu_vec is None: # To output the minimum variance portfolio
mu_vec = ones
# Calculating the analytical solution using CVO - weights
w_cvo = np.dot(inv_cov, mu_vec)
w_cvo /= np.dot(mu_vec.T, w_cvo)
return w_cvo
def allocate_nco(self, cov, mu_vec=None, max_num_clusters=None, n_init=10):
"""
Estimates the optimal allocation using the nested clustered optimization (NCO) algorithm.
First, it clusters the covariance matrix into subsets of highly correlated variables.
Second, it computes the optimal allocation for each of the clusters separately.
This allows collapsing of the original covariance matrix into a reduced covariance matrix,
where each cluster is represented by a single variable.
Third, we compute the optimal allocations across the reduced covariance matrix.
Fourth, the final allocations are the dot-product of the intra-cluster (step 2) allocations and
the inter-cluster (step 3) allocations.
For the Convex Optimization Solution (CVO), a mu - optimal solution parameter is needed.
If mu is the vector of expected values from variables, the result will be
a vector of weights with maximum Sharpe ratio.
If mu is a vector of ones (pass None value), the result will be a vector of weights with
minimum variance.
:param cov: (np.array) Covariance matrix of the variables.
:param mu_vec: (np.array) Expected value of draws from the variables for maximum Sharpe ratio.
None if outputting the minimum variance portfolio.
:param max_тum_сlusters: (int) Allowed maximum number of clusters. If None then taken as num_elements/2.
:param n_init: (float) Number of time the k-means algorithm will run with different centroid seeds (default 10)
:return: (np.array) Optimal allocation using the NCO algorithm.
"""
# Using pd.DataFrame instead of np.array
cov = | pd.DataFrame(cov) | pandas.DataFrame |
from datetime import datetime, timedelta
import pandas as pd
from rest_framework import viewsets
from rest_framework.decorators import action
from rest_framework.response import Response
from common.dates_interval import get_days_interval
from des.dao import DesSkybotJobResultDao
from des.models import SkybotJobResult
from des.serializers import SkybotJobResultSerializer
class SkybotJobResultViewSet(viewsets.ModelViewSet):
queryset = SkybotJobResult.objects.all()
serializer_class = SkybotJobResultSerializer
filter_fields = ('id', 'job', 'exposure',)
ordering_fields = ('id', 'job', 'exposure', 'positions',
'inside_ccd', 'outside_ccd', 'success', 'execution_time', 'exposure__date_obs')
ordering = ('exposure',)
@action(detail=False)
def nites_executed_by_period(self, request):
"""Retorna todas as datas dentro do periodo, que foram executadas pelo skybot.
Exemplo: http://localhost/api/des/skybot_job_result/nites_executed_by_period/?start=2019-01-01&end=2019-01-31
Args:
start (str): Data Inicial do periodo like 2019-01-01
end (str): Data Final do periodo 2019-01-31
Returns:
[array]: um array com todas as datas do periodo no formato [{date: '2019-01-01', count: 0, executed: 0}]
O atributo executed pode ter 3 valores:
0 - para datas que não tem exposição
1 - para datas que tem exposição mas não foram executadas
2 - para datas que tem exposição e foram executadas.
"""
start = request.query_params.get('start')
end = request.query_params.get('end')
all_dates = get_days_interval(start, end)
# Verificar a quantidade de dias entre o start e end.
if len(all_dates) < 7:
dt_start = datetime.strptime(start, '%Y-%m-%d')
dt_end = dt_start + timedelta(days=6)
all_dates = get_days_interval(dt_start.strftime(
"%Y-%m-%d"), dt_end.strftime("%Y-%m-%d"))
df1 = pd.DataFrame()
df1['date'] = all_dates
df1 = df1.set_index('date')
# adicionar a hora inicial e final as datas
start = datetime.strptime(
start, '%Y-%m-%d').strftime("%Y-%m-%d 00:00:00")
end = datetime.strptime(end, '%Y-%m-%d').strftime("%Y-%m-%d 23:59:59")
resultset = DesSkybotJobResultDao().count_exec_by_period(start, end)
if len(resultset) > 0:
df2 = pd.DataFrame(resultset)
# Se a data tiver sido executada recebe o valor 2 se não recebe 1
df2['executed'] = df2['count'].apply(
lambda x: 2 if int(x) > 0 else 1)
else:
df2 = | pd.DataFrame() | pandas.DataFrame |
import logging
import pandas as pd
import modules.game_actions as gm
class VoteCount:
def __init__(self, staff:list, day_start_post:int, bot_cyle:int):
# Initialize empty vote table
self._vote_table = pd.DataFrame(columns=['player', 'public_name', 'voted_by',
'voted_as', 'post_id', 'post_time', 'bot_cycle'])
try:
self._vote_history = | pd.read_csv('vote_history.csv', sep=',') | pandas.read_csv |
import pandas as pd
import subprocess
import logging
import numpy as np
import scipy.ndimage as ndimage
import scipy.interpolate as interp
import scipy.optimize as optim
import shutil
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.colors as colors
import configparser
import collections
from pathlib import Path
from astropy.io import fits
from astropy.modeling import models, fitting
from matplotlib.backends.backend_pdf import PdfPages
import sphere
import sphere.utils as utils
import sphere.utils.imutils as imutils
import sphere.utils.aperture as aperture
import sphere.transmission as transmission
import sphere.toolbox as toolbox
_log = logging.getLogger(__name__)
def get_wavelength_calibration(filter_comb, wave_calib, centers, wave_min, wave_max):
'''
Return the linear wavelength calibration for each IRDIS field
Parameters
----------
filter_comb : str
Filter combination (S_LR or S_MR)
wave_calib : array
Wavelength calibration data computed by esorex recipe
centers : tuple
Center of each field
wave_min : float
Minimal usable wavelength
wave_max : float
Maximal usable wavelength
Returns
-------
wave_lin : array
Array with the linear calibration for each field, as a function
of pixel coordinate
'''
wave_map = np.zeros((2, 1024, 1024))
wave_map[0] = wave_calib[:, 0:1024]
wave_map[1] = wave_calib[:, 1024:]
wave_map[(wave_map < wave_min) | (wave_max < wave_map)] = np.nan
if filter_comb == 'S_LR':
wave_map[:, 630:] = np.nan
wave_map[:, :400] = np.nan
wave_ext = 10
wave_lin = np.zeros((2, 1024))
wave_lin[0] = np.mean(wave_map[0, :, centers[0, 0]-wave_ext:centers[0, 0]+wave_ext], axis=1)
wave_lin[1] = np.mean(wave_map[1, :, centers[1, 0]-wave_ext:centers[1, 0]+wave_ext], axis=1)
return wave_lin
class SpectroReduction(object):
'''
SPHERE/IRDIS long-slit spectroscopy reduction class. It handles
both the low and medium resolution modes (LRS, MRS)
'''
##################################################
# Class variables
##################################################
# specify for each recipe which other recipes need to have been executed before
recipe_requirements = collections.OrderedDict([
('sort_files', []),
('sort_frames', ['sort_files']),
('check_files_association', ['sort_files']),
('sph_ird_cal_dark', ['sort_files']),
('sph_ird_cal_detector_flat', ['sort_files']),
('sph_ird_cal_wave', ['sort_files', 'sph_ird_cal_detector_flat']),
('sph_ird_preprocess_science', ['sort_files', 'sort_frames', 'sph_ird_cal_dark',
'sph_ird_cal_detector_flat']),
('sph_ird_star_center', ['sort_files', 'sort_frames', 'sph_ird_cal_wave']),
('sph_ird_wavelength_recalibration', ['sort_files', 'sort_frames', 'sph_ird_cal_wave']),
('sph_ird_combine_data', ['sort_files', 'sort_frames', 'sph_ird_preprocess_science']),
('sph_ird_clean', [])
])
##################################################
# Constructor
##################################################
def __new__(cls, path, log_level='info', sphere_handler=None):
'''Custom instantiation for the class and initialization for the
instances
The customized instantiation enables to check that the
provided path is a valid reduction path. If not, None will be
returned for the reduction being created. Otherwise, an
instance is created and returned at the end.
Parameters
----------
path : str
Path to the directory containing the dataset
level : {'debug', 'info', 'warning', 'error', 'critical'}
The log level of the handler
sphere_handler : log handler
Higher-level SPHERE.Dataset log handler
'''
#
# make sure we are dealing with a proper reduction directory
#
# init path
path = Path(path).expanduser().resolve()
# zeroth-order reduction validation
raw = path / 'raw'
if not raw.exists():
_log.error('No raw/ subdirectory. {0} is not a valid reduction path'.format(path))
return None
else:
# it's all good: create instance!
reduction = super(SpectroReduction, cls).__new__(cls)
#
# basic init
#
# init path
reduction._path = utils.ReductionPath(path)
# instrument and mode
reduction._instrument = 'IRDIS'
reduction._mode = 'Unknown'
#
# logging
#
logger = logging.getLogger(str(path))
logger.setLevel(log_level.upper())
if logger.hasHandlers():
for hdlr in logger.handlers:
logger.removeHandler(hdlr)
handler = logging.FileHandler(reduction._path.products / 'reduction.log', mode='w', encoding='utf-8')
formatter = logging.Formatter('%(asctime)s\t%(levelname)8s\t%(message)s')
formatter.default_msec_format = '%s.%03d'
handler.setFormatter(formatter)
logger.addHandler(handler)
if sphere_handler:
logger.addHandler(sphere_handler)
reduction._logger = logger
reduction._logger.info('Creating IRDIS spectroscopy reduction at path {}'.format(path))
#
# configuration
#
configfile = f'{Path(sphere.__file__).parent}/instruments/{reduction._instrument}.ini'
config = configparser.ConfigParser()
reduction._logger.debug('> read configuration')
config.read(configfile)
# instrument
reduction._pixel = float(config.get('instrument', 'pixel'))
reduction._nwave = -1
# calibration
reduction._wave_cal_lasers = np.array(eval(config.get('calibration', 'wave_cal_lasers')))
# spectro calibration
reduction._default_center_lrs = np.array(eval(config.get('calibration-spectro', 'default_center_lrs')))
reduction._wave_min_lrs = eval(config.get('calibration-spectro', 'wave_min_lrs'))
reduction._wave_max_lrs = eval(config.get('calibration-spectro', 'wave_max_lrs'))
reduction._default_center_mrs = np.array(eval(config.get('calibration-spectro', 'default_center_mrs')))
reduction._wave_min_mrs = eval(config.get('calibration-spectro', 'wave_min_mrs'))
reduction._wave_max_mrs = eval(config.get('calibration-spectro', 'wave_max_mrs'))
# reduction parameters
reduction._config = {}
for group in ['reduction', 'reduction-spectro']:
items = dict(config.items(group))
reduction._config.update(items)
for key, value in items.items():
try:
val = eval(value)
except NameError:
val = value
reduction._config[key] = val
#
# reduction and recipes status
#
reduction._status = sphere.INIT
reduction._recipes_status = collections.OrderedDict()
for recipe in reduction.recipe_requirements.keys():
reduction._update_recipe_status(recipe, sphere.NOTSET)
# reload any existing data frames
reduction._read_info()
#
# return instance
#
return reduction
##################################################
# Representation
##################################################
def __repr__(self):
return '<SpectroReduction, instrument={}, mode={}, path={}, log={}>'.format(self._instrument, self._mode, self._path, self.loglevel)
def __format__(self):
return self.__repr__()
##################################################
# Properties
##################################################
@property
def loglevel(self):
return logging.getLevelName(self._logger.level)
@loglevel.setter
def loglevel(self, level):
self._logger.setLevel(level.upper())
@property
def instrument(self):
return self._instrument
@property
def pixel(self):
return self._pixel
@property
def nwave(self):
return self._nwave
@property
def path(self):
return self._path
@property
def files_info(self):
return self._files_info
@property
def frames_info(self):
return self._frames_info
@property
def frames_info_preproc(self):
return self._frames_info_preproc
@property
def recipes_status(self):
return self._recipes_status
@property
def status(self):
return self._status
@property
def config(self):
return self._config
@property
def mode(self):
return self._mode
##################################################
# Generic class methods
##################################################
def show_config(self):
'''
Shows the reduction configuration
'''
# dictionary
dico = self.config
# misc parameters
print()
print('{0:<30s}{1}'.format('Parameter', 'Value'))
print('-'*35)
keys = [key for key in dico if key.startswith('misc')]
for key in keys:
print('{0:<30s}{1}'.format(key, dico[key]))
# calibrations
print('-'*35)
keys = [key for key in dico if key.startswith('cal')]
for key in keys:
print('{0:<30s}{1}'.format(key, dico[key]))
# pre-processing
print('-'*35)
keys = [key for key in dico if key.startswith('preproc')]
for key in keys:
print('{0:<30s}{1}'.format(key, dico[key]))
# centring
print('-'*35)
keys = [key for key in dico if key.startswith('center')]
for key in keys:
print('{0:<30s}{1}'.format(key, dico[key]))
# wave
print('-'*35)
keys = [key for key in dico if key.startswith('wave')]
for key in keys:
print('{0:<30s}{1}'.format(key, dico[key]))
# combining
print('-'*35)
keys = [key for key in dico if key.startswith('combine')]
for key in keys:
print('{0:<30s}{1}'.format(key, dico[key]))
# clean
print('-'*35)
keys = [key for key in dico if key.startswith('clean')]
for key in keys:
print('{0:<30s}{1}'.format(key, dico[key]))
print('-'*35)
print()
def init_reduction(self):
'''
Sort files and frames, perform sanity check
'''
self._logger.info('====> Init <====')
self.sort_files()
self.sort_frames()
self.check_files_association()
def create_static_calibrations(self):
'''
Create static calibrations with esorex
'''
self._logger.info('====> Static calibrations <====')
config = self.config
self.sph_ird_cal_dark(silent=config['misc_silent_esorex'])
self.sph_ird_cal_detector_flat(silent=config['misc_silent_esorex'])
self.sph_ird_cal_wave(silent=config['misc_silent_esorex'])
def preprocess_science(self):
'''
Clean and collapse images
'''
self._logger.info('====> Science pre-processing <====')
config = self.config
self.sph_ird_preprocess_science(subtract_background=config['preproc_subtract_background'],
fix_badpix=config['preproc_fix_badpix'],
collapse_science=config['preproc_collapse_science'],
collapse_psf=config['preproc_collapse_psf'],
collapse_center=config['preproc_collapse_center'])
def process_science(self):
'''
Perform star center, combine cubes into final (x,y,time,lambda)
cubes, correct anamorphism and scale the images
'''
self._logger.info('====> Science processing <====')
config = self.config
self.sph_ird_star_center(high_pass_psf=config['center_high_pass_psf'],
high_pass_waffle=config['center_high_pass_waffle'],
box_psf=config['center_box_psf'],
box_waffle=config['center_box_waffle'],
plot=config['misc_plot'])
self.sph_ird_wavelength_recalibration(fit_scaling=config['wave_fit_scaling'],
plot=config['misc_plot'])
self.sph_ird_combine_data(cpix=config['combine_cpix'],
psf_dim=config['combine_psf_dim'],
science_dim=config['combine_science_dim'],
correct_mrs_chromatism=config['combine_correct_mrs_chromatism'],
split_posang=config['combine_split_posang'],
shift_method=config['combine_shift_method'],
manual_center=config['combine_manual_center'],
coarse_centering=config['combine_coarse_centering'])
def clean(self):
'''
Clean the reduction directory, leaving only the raw and products
sub-directory
'''
self._logger.info('====> Clean-up <====')
config = self.config
if config['clean']:
self.sph_ird_clean(delete_raw=config['clean_delete_raw'],
delete_products=config['clean_delete_products'])
def full_reduction(self):
'''
Performs a full reduction of a data set, from the static
calibrations to the final (x,y,time,lambda) cubes
'''
self._logger.info('====> Full reduction <====')
self.init_reduction()
self.create_static_calibrations()
self.preprocess_science()
self.process_science()
self.clean()
##################################################
# Private methods
##################################################
def _read_info(self):
'''
Read the files, calibs and frames information from disk
files_info : dataframe
The data frame with all the information on files
frames_info : dataframe
The data frame with all the information on science frames
frames_info_preproc : dataframe
The data frame with all the information on science frames after pre-processing
This function is not supposed to be called directly by the user.
'''
self._logger.info('Read existing reduction information')
# path
path = self.path
# files info
fname = path.preproc / 'files.csv'
if fname.exists():
self._logger.debug('> read files.csv')
files_info = pd.read_csv(fname, index_col=0)
# convert times
files_info['DATE-OBS'] = pd.to_datetime(files_info['DATE-OBS'], utc=False)
files_info['DATE'] = pd.to_datetime(files_info['DATE'], utc=False)
files_info['DET FRAM UTC'] = pd.to_datetime(files_info['DET FRAM UTC'], utc=False)
# update recipe execution
self._update_recipe_status('sort_files', sphere.SUCCESS)
if np.any(files_info['PRO CATG'] == 'IRD_MASTER_DARK'):
self._update_recipe_status('sph_ird_cal_dark', sphere.SUCCESS)
if np.any(files_info['PRO CATG'] == 'IRD_FLAT_FIELD'):
self._update_recipe_status('sph_ird_cal_detector_flat', sphere.SUCCESS)
if np.any(files_info['PRO CATG'] == 'IRD_WAVECALIB'):
self._update_recipe_status('sph_ird_cal_wave', sphere.SUCCESS)
# update instrument mode
self._mode = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS1 MODE'][0]
else:
files_info = None
fname = path.preproc / 'frames.csv'
if fname.exists():
self._logger.debug('> read frames.csv')
frames_info = pd.read_csv(fname, index_col=(0, 1))
# convert times
frames_info['DATE-OBS'] = pd.to_datetime(frames_info['DATE-OBS'], utc=False)
frames_info['DATE'] = pd.to_datetime(frames_info['DATE'], utc=False)
frames_info['DET FRAM UTC'] = pd.to_datetime(frames_info['DET FRAM UTC'], utc=False)
frames_info['TIME START'] = | pd.to_datetime(frames_info['TIME START'], utc=False) | pandas.to_datetime |
###############################################################################
# create_EPIC_weather_files.py
# email: <EMAIL>, 24th March, 2015.
#
# Convert downloaded data to EPIC compatible weather files.
###############################################################################
import constants, util, logging, os, pandas, datetime, pdb, multiprocessing
from dateutil.rrule import rrule, DAILY, YEARLY
from dateutil.relativedelta import *
# For each grid cell (y_x) process the output data to create an EPIC weather file
###############################################################################
# NARR_to_EPIC
# Convert NARR text file into a EPIC weather files
#
###############################################################################
def NARR_to_EPIC(vals):
lat,lon = vals
# Output pandas frame into EPIC weather file
out_fl = constants.epic_dly+os.sep+str(lat)+'_'+str(lon)+'.txt'
if not(os.path.isfile(out_fl)):
logging.info(out_fl)
# List all years for which we will create EPIC file
lst_yrs = rrule(YEARLY, dtstart=constants.strt_date, until=constants.end_date)
# Create pandas data frame, fill with 0.0s, for 1st year.
epic_df = pandas.DataFrame(index=pandas.date_range(constants.strt_date,constants.end_date),\
columns=[constants.vars_to_get.keys()])
epic_out = open(out_fl,'w')
# Loop across years
for idx_yr in range(lst_yrs.count()):
cur_strt_date = datetime.date(lst_yrs[idx_yr].year,1,1)
cur_end_date = datetime.date(lst_yrs[idx_yr].year,12,31)
cur_date_range = pandas.date_range(cur_strt_date,cur_end_date)
tmp_df = pandas.DataFrame(index=cur_date_range,columns=[constants.vars_to_get.keys()])
tmp_df.fillna(0.0,inplace=True)
# Loop across variables
for cur_var in constants.vars_to_get.keys():
e_fl = open(constants.data_dir + os.sep + 'Data' + os.sep + cur_var + os.sep + str(lst_yrs[idx_yr].year)+\
os.sep + str(lat) + '_' + str(lon) + '.txt')
epic_vars = filter(None,e_fl.readlines()[0].strip().split("'"))
if cur_var == 'air.2m':
epic_min_tmp = util.chunks(epic_vars,8,True)
epic_max_tmp = util.chunks(epic_vars,8,False)
tmp_df[cur_var] = pandas.Series(epic_min_tmp,index=cur_date_range)
tmp_df[cur_var] = tmp_df[cur_var].map(lambda x:float(x)+constants.K_To_C)
tmp_df['tmax'] = pandas.Series(epic_max_tmp,index=cur_date_range)
tmp_df['tmax'] = tmp_df['tmax'].map(lambda x:float(x)+constants.K_To_C)
tmp_df['tmin'] = tmp_df['air.2m']
else:
tmp_df[cur_var] = pandas.Series(epic_vars,index=cur_date_range)
tmp_df[cur_var] = tmp_df[cur_var].map(lambda x:float(x))
# Get into right units
tmp_df['wnd'] = pandas.Series(tmp_df['uwnd.10m'].astype(float)**2.0+\
tmp_df['vwnd.10m'].astype(float)**2.0,index=tmp_df.index)
tmp_df['wnd'] = tmp_df['wnd']**0.5
tmp_df['rhum.2m'] = tmp_df['rhum.2m'].map(lambda x:float(x)/100.0)
tmp_df['swr_diff'] = | pandas.Series(tmp_df['dswrf']-tmp_df['uswrf.sfc'],index=tmp_df.index) | pandas.Series |
# IMPORT LIBRARIES
import warnings
warnings.filterwarnings("ignore")
import datetime as dt
import pandas as pd
import numpy as np
pd.options.mode.chained_assignment = None
pd.set_option('chained_assignment', None)
import plotly.express as px
import plotly.graph_objects as go
import dash_auth, dash
from dash import dcc
from dash import html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output
# START APP
app = dash.Dash(__name__, external_stylesheets = [dbc.themes.LITERA],
meta_tags = [{'name': 'viewport',
'content': 'width=device-width, initial-scale = 1.0'}]
)
app.title = 'MATH 231.4 Dashboard'
server = app.server
# LOAD DATASETS
today = dt.date(2020,5,23).strftime('%Y%m%d')
client = | pd.read_csv('dim_client.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 20 10:06:16 2018
@author: <NAME>
"""
import matplotlib.pyplot as plt
import pandas as pd
from pandas.plotting import autocorrelation_plot
from pandas import ExcelWriter
import numpy as np
import scipy as sp
###this file is used to calculate local flows for control points##
#reading in cp historical data#
#data starts 10/01/1952
cfs_to_cms = 0.0283168
ALBin = pd.read_excel('CP_historical/ALBANY.xlsx',usecols=[2,3],skiprows=1736+92,skipfooter=3379+274,header=None)
SALin = pd.read_excel('CP_historical/SALEM.xlsx',usecols=[2,3],skiprows=1736+92,skipfooter=3379+274,header=None)
HARin = pd.read_excel('CP_historical/HARRISBURG.xlsx',usecols=[2,3],skiprows=1736+92,skipfooter=3379+274,header=None)
HARshift = pd.read_excel('CP_historical/HARRISBURG.xlsx',usecols=[2,3],skiprows=1735+92,skipfooter=3380+274,header=None) #shifted one day ahead
VIDin = pd.read_excel('CP_historical/VIDA.xlsx',usecols=[2,3],skiprows=1736+92,skipfooter=3379+274,header=None)
JEFin = pd.read_excel('CP_historical/JEFFERSON.xlsx',usecols=[2,3],skiprows=1736+92,skipfooter=3379+274,header=None)
MEHin = pd.read_excel('CP_historical/MEHAMA.xlsx',usecols=[2,3],skiprows=1736+92,skipfooter=3379+274,header=None)
MONin = | pd.read_excel('CP_historical/MONROE.xlsx',usecols=[2,3],skiprows=1736+92,skipfooter=3379+274,header=None) | pandas.read_excel |
import os
import sys
import re
import gzip
import tarfile
import io
import scipy
import collections
import argparse
import pandas as pd
import numpy as np
from scipy.stats import binom
from pandas.api.types import is_string_dtype
from pathlib import Path
import numbers
xls = re.compile("xls")
drop = "series_matrix\.txt\.gz|filelist\.txt|readme|\.bam|\.sam|\.csfasta|\.fa(sta)?|\.f(a|n)a|(big)?wig|\.bed(graph)?|(broad_)?lincs"
drop = re.compile(drop)
gse = re.compile("GSE\d+_")
pv_str = "p[^a-zA-Z]{0,4}val"
pv = re.compile(pv_str)
adj = re.compile("adj|fdr|corr|thresh")
ws = re.compile(" ")
mtabs = re.compile("\w+\t{2,}\w+")
tab = re.compile("\t")
fields = ["Type", "Class", "Conversion", "pi0", "FDR_pval", "hist", "note"]
PValSum = collections.namedtuple("PValSum", fields, defaults=[np.nan] * len(fields))
def raw_pvalues(i):
return bool(pv.search(i.lower()) and not adj.search(i.lower()))
def find_header(df, n=20):
head = df.head(n)
idx = 0
for col in head:
s = head[col]
match = s.str.contains(pv_str, na=False)
if any(match):
idx = s.index[match].tolist()[0] + 1
break
if idx == 0:
for index, row in head.iterrows():
if all([isinstance(i, str) for i in row if i is not np.nan]):
idx = index + 1
break
return idx
def csv_helper(input, input_name, csv, verbose=0):
# Get comments and set rows to skip
r = pd.read_csv(csv, sep=None, engine="python", iterator=True, nrows=1000)
comment = None
sep = r._engine.data.dialect.delimiter
columns = r._engine.columns
if isinstance(input, (tarfile.ExFileObject)):
with csv as h:
first_line = h.readline()
elif input_name.endswith("gz") or isinstance(input, (gzip.GzipFile)):
with gzip.open(input) as h:
first_line = h.readline().decode("utf-8").rstrip()
else:
with open(input, "r") as h:
first_line = h.readline().rstrip()
more_tabs_than_sep = len(tab.findall(first_line)) > len(re.findall(sep, first_line))
if re.search("^#", first_line) or more_tabs_than_sep:
comment = "#"
# Get delimiter
r = pd.read_csv(
csv, sep=None, engine="python", iterator=True, skiprows=20, nrows=1000
)
sep = r._engine.data.dialect.delimiter
columns = r._engine.columns
if ws.search(sep):
sep = "\s+"
if mtabs.search(first_line):
sep = "\t+"
# Import file
if isinstance(input, (tarfile.ExFileObject)) and input_name.endswith("gz"):
with gzip.open(input) as h:
df = pd.read_csv(h, sep=sep, comment=comment, encoding="unicode_escape")
else:
df = pd.read_csv(input, sep=sep, comment=comment, encoding="unicode_escape")
# Check and fix column names
# Case of extra level of delimiters in column names
if len(df.columns) > len(columns):
df = pd.read_csv(
input,
header=None,
skiprows=[0],
sep=sep,
comment=comment,
encoding="unicode_escape",
).drop([0])
df.columns = columns
unnamed = ["Unnamed" in i for i in df.columns]
# Case of empty rows before header
if all(unnamed):
idx = find_header(df)
if idx > 0:
df = pd.read_csv(
input, sep=sep, comment=comment, skiprows=idx, encoding="unicode_escape"
)
# Case of anonymous row names
if unnamed[-1] & sum(unnamed) == 1:
if any([pv.search(i) for i in df.columns]):
df.columns = [df.columns[-1]] + list(df.columns[:-1])
if verbose > 1:
print("df after import:\n", df)
return {os.path.basename(input_name): df}
def excel_helper(input, input_name, verbose=0):
tabs = {}
if input_name.endswith("gz") or isinstance(input, (gzip.GzipFile)):
wb = pd.ExcelFile(gzip.open(input))
else:
wb = | pd.ExcelFile(input) | pandas.ExcelFile |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Title: listing_cleaning.py
Description: Split the previewAmenityNames into four seperate amenity,
calculate Bayesian average rating from original rating data,
merge response rate data from CSV downloaded from insideairbnb.com,
and correct data type of the master dataset.
"""
import numpy as np
import pandas as pd
def amenity():
# to separate four amenities
listings = pd.read_csv('../data_collection/listings_clean_copy.csv', header=0, index_col=0)
amenity = listings['previewAmenityNames']
wifi = []
kitchen = []
heating = []
air_conditioning = []
for index in amenity.index:
if 'Wifi' in amenity.iloc[index]:
wifi.append('1')
else:
wifi.append('0')
if 'Kitchen' in amenity.iloc[index]:
kitchen.append('1')
else:
kitchen.append('0')
if 'Heating' in amenity.iloc[index]:
heating.append('1')
else:
heating.append('0')
if 'Air conditioning' in amenity.iloc[index]:
air_conditioning.append('1')
else:
air_conditioning.append('0')
listings['Wifi'] = wifi
listings['Kitchen'] = kitchen
listings['Heating'] = heating
listings['Air conditioning'] =air_conditioning
listings.to_csv('../data_collection/listings_clean_copy.csv', index = False)
def bayesian_average():
"""
Calculate bayesian average rating using the mean and median of the existing rating data.
"""
listings = pd.read_csv('../data_collection/listings_clean_copy.csv', header=0, index_col=0)
print(listings.keys())
print(listings['reviewsCount'].describe())
listings['bysAvgRating'] = ((listings['avgRating'] * listings['reviewsCount']) + (
np.mean(listings['avgRating']) * np.median(listings['reviewsCount']))) / (
listings['reviewsCount'] + np.median(listings['reviewsCount']))
print(listings['bysAvgRating'].describe)
listings.to_csv('../data_collection/listings_clean_copy.csv')
def merge_host_response_data():
"""
Merging host response information from dataset of insideairbnb.com.
:return:
"""
listings_master = pd.read_csv('../data_collection/listings_clean_copy.csv', header=0, index_col=0)
listings_insidebnb = pd.read_csv('../data_collection/listings_insidebnb.csv', header=0)
listings_insidebnb = listings_insidebnb[['id', 'host_response_time', 'host_response_rate', 'host_acceptance_rate']]
listings_master = | pd.merge(listings_master, listings_insidebnb, on='id', how='left') | pandas.merge |
import pandas as pd
import numpy as np
# from pandas.core.tools.datetimes import normalize_date
from pandas._libs import tslib
from backend.robinhood_api import RobinhoodAPI
class RobinhoodData:
"""
Wrapper to download orders and dividends from Robinhood accounts
Downloads two dataframes and saves to datafile
----------
Parameters:
datafile : location of h5 datafile
"""
def __init__(self, datafile):
self.datafile = datafile
def _login(self, user, password):
self.client = RobinhoodAPI()
# try import the module with passwords
try:
_temp = __import__('auth')
self.client.login(_temp.local_user, _temp.local_password)
except:
self.client.login(username=user, password=password)
return self
# private method for getting all orders
def _fetch_json_by_url(self, url):
return self.client.session.get(url).json()
# deleting sensitive or redundant fields
def _delete_sensitive_fields(self, df):
for col in ['account', 'url', 'id', 'instrument']:
if col in df:
del df[col]
return df
# download orders and fields requiring RB client
def _download_orders(self):
print("Downloading orders from Robinhood")
orders = []
past_orders = self.client.order_history()
orders.extend(past_orders['results'])
while past_orders['next']:
next_url = past_orders['next']
past_orders = self._fetch_json_by_url(next_url)
orders.extend(past_orders['results'])
df = pd.DataFrame(orders)
df['symbol'] = df['instrument'].apply(
self.client.get_symbol_by_instrument)
df.sort_values(by='created_at', inplace=True)
df.reset_index(inplace=True, drop=True)
df_ord = self._delete_sensitive_fields(df)
return df_ord
# download dividends and fields requiring RB client
def _download_dividends(self):
print("Downloading dividends from Robinhood")
dividends = self.client.dividends()
dividends = [x for x in dividends['results']]
df = pd.DataFrame(dividends)
if df.shape[0] > 0:
df['symbol'] = df['instrument'].apply(
self.client.get_symbol_by_instrument)
df.sort_values(by='paid_at', inplace=True)
df.reset_index(inplace=True, drop=True)
df_div = self._delete_sensitive_fields(df)
else:
df_div = pd.DataFrame(columns=['symbol', 'amount', 'position',
'rate', 'paid_at', 'payable_date'])
return df_div
# process orders
def _process_orders(self, df_ord):
# assign to df and reduce the number of fields
df = df_ord.copy()
fields = [
'created_at',
'average_price', 'cumulative_quantity', 'fees',
'symbol', 'side']
df = df[fields]
# convert types
for field in ['average_price', 'cumulative_quantity', 'fees']:
df[field] = pd.to_numeric(df[field])
for field in ['created_at']:
df[field] = pd.to_datetime(df[field])
# add days
df['date'] = df['created_at'].apply(
lambda x: tslib.normalize_date(x))
# rename columns for consistency
df.rename(columns={
'cumulative_quantity': 'current_size'
}, inplace=True)
# quantity accounting for side of transaction for cumsum later
df['signed_size'] = np.where(
df.side == 'buy',
df['current_size'],
-df['current_size'])
df['signed_size'] = df['signed_size'].astype(np.int64)
return df
# process_orders
def _process_dividends(self, df_div):
df = df_div.copy()
# convert types
for field in ['amount', 'position', 'rate']:
df[field] = pd.to_numeric(df[field])
for field in ['paid_at', 'payable_date']:
df[field] = pd.to_datetime(df[field])
# add days
df['date'] = df['paid_at'].apply(
lambda x: tslib.normalize_date(x))
return df
def _generate_positions(self, df_ord):
"""
Process orders dataframe and generate open and closed positions.
For all open positions close those which were later sold, so that
the cost_basis for open can be calculated correctly. For closed
positions calculate the cost_basis based on the closed open positions.
Note: the olders open positions are first to be closed. The logic here
is to reduce the tax exposure.
-----
Parameters:
- Pre-processed df_ord
Return:
- Two dataframes with open and closed positions correspondingly
"""
# prepare dataframe for open and closed positions
df_open = df_ord[df_ord.side == 'buy'].copy()
df_closed = df_ord[df_ord.side == 'sell'].copy()
# create a new column for today's position size
# TODO: may be redundant - review later
df_open['final_size'] = df_open['current_size']
df_closed['final_size'] = df_closed['current_size']
# main loop
for i_closed, row_closed in df_closed.iterrows():
sell_size = row_closed.final_size
sell_cost_basis = 0
for i_open, row_open in df_open[
(df_open.symbol == row_closed.symbol) &
(df_open.date < row_closed.date)].iterrows():
new_sell_size = sell_size - df_open.loc[i_open, 'final_size']
new_sell_size = 0 if new_sell_size < 0 else new_sell_size
new_open_size = df_open.loc[i_open, 'final_size'] - sell_size
new_open_size = new_open_size if new_open_size > 0 else 0
# updating open positions
df_open.loc[i_open, 'final_size'] = new_open_size
# updating closed positions
df_closed.loc[i_closed, 'final_size'] = new_sell_size
sold_size = sell_size - new_sell_size
sell_cost_basis +=\
df_open.loc[i_open, 'average_price'] * sold_size
sell_size = new_sell_size
# assign a cost_basis to the closed position
df_closed.loc[i_closed, 'current_cost_basis'] = -sell_cost_basis
# calculate cost_basis for open positions
df_open['current_cost_basis'] =\
df_open['current_size'] * df_open['average_price']
df_open['final_cost_basis'] =\
df_open['final_size'] * df_open['average_price']
# calculate capital gains for closed positions
df_closed['realized_gains'] =\
df_closed['current_size'] * df_closed['average_price'] +\
df_closed['current_cost_basis']
df_closed['final_cost_basis'] = 0
return df_open, df_closed
def download_robinhood_data(self, user, password):
self._login(user, password)
df_div = self._process_dividends(self._download_dividends())
df_div.to_hdf(self.datafile, 'dividends')
df_ord = self._process_orders(self._download_orders())
df_ord.to_hdf(self.datafile, 'orders')
df_open, df_closed = self._generate_positions(df_ord)
df_open.to_hdf(self.datafile, 'open')
df_closed.to_hdf(self.datafile, 'closed')
return df_div, df_ord, df_open, df_closed
if __name__ == "__main__":
rd = RobinhoodData('../data/data.h5')
if False:
df_div, df_ord, df_open, df_closed =\
rd.download_robinhood_data(None, None)
df_div = pd.read_hdf('../data/data.h5', 'dividends')
df_ord = pd.read_hdf('../data/data.h5', 'orders')
df_open = | pd.read_hdf('../data/data.h5', 'open') | pandas.read_hdf |
import pywt # 1.0.3
import numpy as np # 1.19.5
import pandas as pd # 0.25.1
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
class MultiscalePCA:
"""
- Multiscale Principal Component with Wavelet Decomposition
- Return estimate of X
*Example
import mspca
mymodel = mspca.MultiscalePCA()
x_pred = mymodel.fit_transform(X, wavelet_func='db4', threshold=0.3)
"""
def __init__(self):
self.x_norm = None
self.fit_bool = False
def _split_coef(self, train_data, wavelet_func):
self.w = pywt.Wavelet(wavelet_func)
# Wavelet decomposition
temp_coef = pywt.wavedec(train_data[:, 0], self.w)
# maxlev = pywt.dwt_max_level(len(x_norm), w.dec_len)
self.coef_num = len(temp_coef)
self.x_var_num = train_data.shape[1]
a_coef_list = []
for i in range(1, self.coef_num):
globals()['D{}'.format(i)] = []
for i in range(self.x_var_num):
coeffs = pywt.wavedec(train_data[:, i], self.w)
# Add Approximation Coefficient
a_coef_list.append(coeffs[0])
# Add Detailed Coefficient
for j in range(1, self.coef_num):
tmp = globals()['D{}'.format(j)]
tmp.append(coeffs[j])
globals()['D{}'.format(j)] = tmp
a_df = | pd.DataFrame(a_coef_list) | pandas.DataFrame |
from arche.rules.category_coverage import get_coverage_per_category
from arche.rules.result import Level
from conftest import create_result
import pandas as pd
import pytest
@pytest.mark.parametrize(
"data, tags, expected_messages",
[
(
{"sex": ["male", "female", "male"], "country": ["uk", "uk", "uk"]},
{"category": ["sex", "country"]},
{
Level.INFO: [
(
"2 categories in 'sex'",
None,
None,
| pd.Series({"male": 2, "female": 1}, name="sex") | pandas.Series |
import subprocess
import time
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
import altair as alt
import flatlatex
import numpy as np
import pandas as pd
Chart = alt.vegalite.v4.api.Chart
def display_img(img: str) -> None:
"""Display image inside notebook while preventing browser caching
Args:
img: Path to image
"""
import IPython.display as IPd
IPd.display(IPd.HTML('<img src="{}?now={}" / >'.format(img, time.time())))
def rgb2hex(r: int, g: int, b: int) -> str:
"""Convert RGB to HEX"""
return "#{:02x}{:02x}{:02x}".format(r, g, b)
def hex2rgb(hex: str) -> Tuple[int]:
"""Convert HEX to RGB"""
h = hex.lstrip("#")
return tuple(int(h[i:i+2], 16) for i in (0, 2, 4))
def convert_file(
fn: str,
to: str = "png",
dpi: int = 300,
background: Optional[str] = None,
debug: bool = False,
) -> str:
"""Convert files with Inkscape
Assumes that Inkscape can be called from shell with `inkscape`.
Args:
fn: Filename
to: Target format
dpi: Resolution
background: Background color for `--export-background`
debug: Debug mode
Returns:
New filename
"""
fn = Path(fn).absolute()
fn_new = fn.with_suffix(f".{to}")
cmd = f"inkscape --export-filename={str(fn_new)} {str(fn)} --export-dpi={dpi}"
if background is not None:
cmd += " --export-background='{background}'"
if debug:
print(cmd)
else:
cmd += " &> /dev/null"
subprocess.call(cmd, shell=True)
return fn_new
def save(
chart: Chart,
path: str,
extra_formats: Optional[Union[str, List[str]]] = None,
debug: bool = False,
):
"""Save chart using `altair_saver`
`altair_saver` seems to work best with `.svg` exports. When `extra_formats` is
specified, the saved file (e.g., a `svg`) can be converted using the `convert_file`
function which uses `inkscape` for file conversion (e.g., to save `png` or `pdf`
versions of a chart).
Args:
chart: Chart to save
path: Path to save chart to.
extra_formats: Extra formats to save chart as, uses `convert_file` on saved file
"""
try:
import altair_saver
except ImportError:
print("altair_saver is required")
print("http://github.com/altair-viz/altair_saver/")
quit()
path = str(path)
chart.save(path)
if extra_formats is not None:
formats = (
[extra_formats] if not isinstance(extra_formats, list) else extra_formats
)
for ff in formats:
convert_file(path, to=ff, debug=debug)
def latex2unicode(latex: Union[str, List[str]]) -> Union[str, List[str]]:
"""Converts latex strings to unicode using `flatlatex`
Args:
latex: Latex strings (single string or list of strings)
Returns:
Unicode strings
"""
c = flatlatex.converter()
if type(latex) == str:
return c.convert(latex)
elif type(latex) == list:
return [c.convert(entry) for entry in latex]
else:
raise NotImplementedError
def np2df(
samples: Union[np.ndarray, List[np.ndarray]],
field: str = "samples",
labels_samples: Optional[Union[str, List[str]]] = None,
labels_dim: List[str] = None,
drop_dim: Optional[List[str]] = None,
) -> pd.DataFrame:
"""Converts numpy arrays to pandas DataFrame used for plotting with `deneb.pairplot`
Args:
samples: Samples
field: Field for sample identifier
labels_samples: Labels for samples
labels_dim: Labels for dimensions
drop_dim: Dimensions to drop
Returns:
Formatted pandas DataFrame
"""
if not isinstance(samples, list):
samples = [samples]
samples = [s for s in samples]
dim_samples = samples[0].shape[1]
for sample in samples:
assert sample.shape[1] == dim_samples
if labels_samples is None:
labels_samples = [f"sample {i+1}" for i in range(len(samples))]
if not isinstance(labels_samples, list):
labels_samples = [labels_samples]
assert len(labels_samples) == len(samples)
if labels_dim is None:
labels_dim = [f"dim {i+1}" for i in range(dim_samples)]
assert len(labels_dim) == dim_samples
dfs = []
for i, sample in enumerate(samples):
df = pd.DataFrame(sample, columns=labels_dim)
df[field] = labels_samples[i]
dfs.append(df)
df = | pd.concat(dfs, ignore_index=True) | pandas.concat |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import cantera as ct
import copy
from textwrap import wrap
import scipy.stats as stats
import math
from scipy.stats import multivariate_normal
from matplotlib import style
from mpl_toolkits.mplot3d import Axes3D
from scipy.stats import norm
import matplotlib.mlab as mlab
import matplotlib.cm as cm
import MSI.master_equation.master_equation as meq
import re
import os
import MSI.simulations.instruments.ignition_delay as ig
import MSI.cti_core.cti_processor as pr
import MSI.simulations.instruments.jsr_steadystate as jsr
class Plotting(object):
def __init__(self,S_matrix,
s_matrix,
Y_matrix,
y_matrix,
z_matrix,
X,
sigma,
covarience,
original_covariance,
S_matrix_original,
exp_dict_list_optimized,
exp_dict_list_original,
parsed_yaml_list,
Ydf,
target_value_rate_constant_csv='',
target_value_rate_constant_csv_extra_values = '',
k_target_value_S_matrix = None,
k_target_values='Off',
working_directory='',
sigma_uncertainty_weighted_sensitivity_csv='',
simulation_run=None,
shock_tube_instance = None,
cheby_sensitivity_dict = None,
mapped_to_alpha_full_simulation=None,
optimized_cti_file='',
original_cti_file='',
sigma_ones=False,
T_min=200,
T_max=3000,
P_min=1013.25,
P_max=1.013e+6):
self.S_matrix = S_matrix
self.s_matrix = s_matrix
self.Y_matrix = Y_matrix
self.y_matrix = y_matrix
self.z_matrix = z_matrix
self.X = X
self.sigma = sigma
#self.sigma = sigma
self.covarience=covarience
self.original_covariance=original_covariance
#original
self.S_matrix_original=S_matrix_original
self.exp_dict_list_optimized = exp_dict_list_optimized
self.exp_dict_list_original = exp_dict_list_original
self.parsed_yaml_list = parsed_yaml_list
self.target_value_rate_constant_csv = target_value_rate_constant_csv
self.k_target_value_S_matrix = k_target_value_S_matrix
self.Ydf = Ydf
self.k_target_values=k_target_values
self.target_value_rate_constant_csv_extra_values = target_value_rate_constant_csv_extra_values
self.working_directory = working_directory
self.sigma_uncertainty_weighted_sensitivity_csv = sigma_uncertainty_weighted_sensitivity_csv
self.simulation_run = simulation_run
self.shock_tube_instance = shock_tube_instance
self.cheby_sensitivity_dict=cheby_sensitivity_dict
self.mapped_to_alpha_full_simulation = mapped_to_alpha_full_simulation,
self.new_cti=optimized_cti_file
self.nominal_cti=original_cti_file
self.sigma_ones = sigma_ones
self.T_min = T_min
self.T_max = T_max
self.P_min = P_min
self.P_max = P_max
def lengths_of_experimental_data(self):
simulation_lengths_of_experimental_data = []
for i,exp in enumerate(self.exp_dict_list_optimized):
length_of_experimental_data=[]
observable_counter=0
for j,observable in enumerate(exp['mole_fraction_observables'] + exp['concentration_observables'] + exp['ignition_delay_observables']):
if observable == None:
continue
if observable in exp['mole_fraction_observables']:
if re.match('[Ss]hock [Tt]ube',exp['simulation_type']):
length_of_experimental_data.append(exp['experimental_data'][observable_counter]['Time'].shape[0])
observable_counter+=1
elif re.match('[Jj][Ss][Rr]',exp['simulation_type']):
length_of_experimental_data.append(exp['experimental_data'][observable_counter]['Temperature'].shape[0])
observable_counter+=1
elif re.match('[Ss]pecies[- ][Pp]rofile',exp['experiment_type']) and re.match('[Ff]low[ -][Rr]eactor',exp['simulation_type']):
length_of_experimental_data.append(exp['experimental_data'][observable_counter]['Temperature'].shape[0])
observable_counter+=1
if observable in exp['concentration_observables']:
if re.match('[Ss]hock [Tt]ube',exp['simulation_type']):
length_of_experimental_data.append(exp['experimental_data'][observable_counter]['Time'].shape[0])
observable_counter+=1
elif re.match('[Jj][Ss][Rr]',exp['simulation_type']):
length_of_experimental_data.append(exp['experimental_data'][observable_counter]['Temperature'].shape[0])
observable_counter+=1
elif re.match('[Ss]pecies[- ][Pp]rofile',exp['experiment_type']) and re.match('[Ff]low[ -][Rr]eactor',exp['simulation_type']):
length_of_experimental_data.append(exp['experimental_data'][observable_counter]['Temperature'].shape[0])
observable_counter+=1
if observable in exp['ignition_delay_observables']:
if re.match('[Ss]hock [Tt]ube',exp['simulation_type']) and re.match('[iI]gnition[- ][Dd]elay',exp['experiment_type']):
if 'temperature' in list(exp['experimental_data'][observable_counter].columns):
length_of_experimental_data.append(exp['experimental_data'][observable_counter]['temperature'].shape[0])
observable_counter+=1
elif 'pressure' in list(exp['experimental_data'][observable_counter].columns):
length_of_experimental_data.append(exp['experimental_data'][observable_counter]['pressure'].shape[0])
observable_counter+=1
else:
length_of_experimental_data.append(exp['experimental_data'][observable_counter].shape[0])
observable_counter+=1
elif re.match('[Rr][Cc][Mm]',exp['simulation_type']) and re.match('[iI]gnition[- ][Dd]elay',exp['experiment_type']):
if 'temperature' in list(exp['experimental_data'][observable_counter].columns):
length_of_experimental_data.append(exp['experimental_data'][observable_counter]['temperature'].shape[0])
observable_counter+=1
elif 'pressure' in list(exp['experimental_data'][observable_counter].columns):
length_of_experimental_data.append(exp['experimental_data'][observable_counter]['pressure'].shape[0])
observable_counter+=1
else:
length_of_experimental_data.append(exp['experimental_data'][observable_counter].shape[0])
observable_counter+=1
if 'perturbed_coef' in exp.keys():
wavelengths = self.parsed_yaml_list[i]['absorbanceCsvWavelengths']
absorbance_wl=0
for k,wl in enumerate(wavelengths):
length_of_experimental_data.append(exp['absorbance_experimental_data'][k]['time'].shape[0])
absorbance_wl+=1
else:
absorbance_wl=0
simulation_lengths_of_experimental_data.append(length_of_experimental_data)
self.simulation_lengths_of_experimental_data=simulation_lengths_of_experimental_data
return observable_counter+absorbance_wl,length_of_experimental_data
def calculating_sigmas(self,S_matrix,covarience):
sigmas =[[] for x in range(len(self.simulation_lengths_of_experimental_data))]
counter=0
for x in range(len(self.simulation_lengths_of_experimental_data)):
for y in range(len(self.simulation_lengths_of_experimental_data[x])):
temp=[]
for z in np.arange(counter,(self.simulation_lengths_of_experimental_data[x][y]+counter)):
SC = np.dot(S_matrix[z,:],covarience)
sigma = np.dot(SC,np.transpose(S_matrix[z,:]))
test = sigma
sigma = np.sqrt(sigma)
temp.append(sigma)
temp = np.array(temp)
sigmas[x].append(temp)
counter = counter + self.simulation_lengths_of_experimental_data[x][y]
return sigmas, test
def run_ignition_delay(self,exp,cti,n_of_data_points=10):
p=pr.Processor(cti)
if 'volumeTraceCsv' not in exp['simulation'].fullParsedYamlFile.keys():
if len(exp['simulation'].fullParsedYamlFile['temperatures'])>1:
tempmin=np.min(exp['simulation'].fullParsedYamlFile['temperatures'])
print(tempmin , 'THis is the min temp')
tempmax=np.max(exp['simulation'].fullParsedYamlFile['temperatures'])
print(tempmax,'This is the max temp')
total_range=tempmax-tempmin
tempmax=tempmax+0.1*total_range
tempmin=tempmin-0.1*total_range
temprange=np.linspace(tempmin,tempmax,n_of_data_points)
pressures=exp['simulation'].fullParsedYamlFile['pressures']
print(pressures,'These are the pressures')
conds=exp['simulation'].fullParsedYamlFile['conditions_to_run']
print(conds,'These are the conditions')
elif len(exp['simulation'].fullParsedYamlFile['pressures'])>1:
pmin = exp['simulation'].fullParsedYamlFile['pressures']*0.9
pmax = exp['simulation'].fullParsedYamlFile['pressures']*1.1
total_range=pmax-pmin
pmax=pmax+0.1*total_range
pmin=pmin-0.1*total_range
pressures = np.linspace(pmin,pmax,n_of_data_points)
temprange = exp['simulation'].fullParsedYamlFile['temperatures']
conds = exp['simulation'].fullParsedYamlFile['conditions_to_run']
elif len(exp['simulation'].fullParsedYamlFile['conditions_to_run'])>1:
print('Plotting for conditions depedendent ignition delay not yet installed')
ig_delay=ig.ignition_delay_wrapper(pressures=pressures,
temperatures=temprange,
observables=exp['simulation'].fullParsedYamlFile['observables'],
kineticSens=0,
physicalSens=0,
conditions=conds,
thermalBoundary=exp['simulation'].fullParsedYamlFile['thermalBoundary'],
mechanicalBoundary=exp['simulation'].fullParsedYamlFile['mechanicalBoundary'],
processor=p,
cti_path="",
save_physSensHistories=0,
fullParsedYamlFile=exp['simulation'].fullParsedYamlFile,
save_timeHistories=0,
log_file=True,
log_name='log.txt',
timeshift=exp['simulation'].fullParsedYamlFile['time_shift'],
initialTime=exp['simulation'].fullParsedYamlFile['initialTime'],
finalTime=exp['simulation'].fullParsedYamlFile['finalTime'],
target=exp['simulation'].fullParsedYamlFile['target'],
target_type=exp['simulation'].fullParsedYamlFile['target_type'],
n_processors=2)
soln,temp=ig_delay.run()
elif 'volumeTraceCsv' in exp['simulation'].fullParsedYamlFile.keys():
if len(exp['simulation'].fullParsedYamlFile['temperatures'])>1:
tempmin=np.min(exp['simulation'].fullParsedYamlFile['temperatures'])
tempmax=np.max(exp['simulation'].fullParsedYamlFile['temperatures'])
total_range=tempmax-tempmin
tempmax=tempmax+0.1*total_range
tempmin=tempmin-0.1*total_range
temprange=np.linspace(tempmin,tempmax,n_of_data_points)
pressures=exp['simulation'].fullParsedYamlFile['pressures']
conds=exp['simulation'].fullParsedYamlFile['conditions_to_run']
volumeTrace = exp['simulation'].fullParsedYamlFile['volumeTraceCsv']
elif len(exp['simulation'].fullParsedYamlFile['pressures'])>1:
pmin = exp['simulation'].fullParsedYamlFile['pressures']*0.9
pmax = exp['simulation'].fullParsedYamlFile['pressures']*1.1
total_range=pmax-pmin
pmax=pmax+0.1*total_range
pmin=pmin-0.1*total_range
pressures = np.linspace(pmin,pmax,n_of_data_points)
temprange = exp['simulation'].fullParsedYamlFile['temperatures']
conds = exp['simulation'].fullParsedYamlFile['conditions_to_run']
volumeTrace = exp['simulation'].fullParsedYamlFile['volumeTraceCsv']
elif len(exp['simulation'].fullParsedYamlFile['conditions_to_run'])>1:
print('Plotting for conditions depedendent ignition delay not yet installed')
ig_delay=ig.ignition_delay_wrapper(pressures=pressures,
temperatures=temprange,
observables=exp['simulation'].fullParsedYamlFile['observables'],
kineticSens=0,
physicalSens=0,
conditions=conds,
thermalBoundary=exp['simulation'].fullParsedYamlFile['thermalBoundary'],
mechanicalBoundary=exp['simulation'].fullParsedYamlFile['mechanicalBoundary'],
processor=p,
cti_path="",
save_physSensHistories=0,
fullParsedYamlFile=exp['simulation'].fullParsedYamlFile,
save_timeHistories=0,
log_file=True,
log_name='log.txt',
timeshift=exp['simulation'].fullParsedYamlFile['time_shift'],
initialTime=exp['simulation'].fullParsedYamlFile['initialTime'],
finalTime=exp['simulation'].fullParsedYamlFile['finalTime'],
target=exp['simulation'].fullParsedYamlFile['target'],
target_type=exp['simulation'].fullParsedYamlFile['target_type'],
n_processors=2,
volumeTrace=volumeTrace)
soln,temp=ig_delay.run()
#print(soln)
return soln
def run_jsr(self,exp,cti,n_of_data_points=100):
p=pr.Processor(cti)
tempmin=np.min(exp['simulation'].fullParsedYamlFile['temperatures'])
print('Tempmin: '+str(tempmin))
tempmax=np.max(exp['simulation'].fullParsedYamlFile['temperatures'])
print('Tempmax: '+str(tempmax))
if tempmax!=tempmin:
total_range=tempmax-tempmin
tempmax=tempmax+0.1*total_range
tempmin=tempmin-0.1*total_range
elif tempmax==tempmin:
tempmax=tempmax*1.1
tempmin=tempmin*0.9
temprange=np.linspace(tempmin,tempmax,n_of_data_points)
print(temprange)
pressures=exp['simulation'].fullParsedYamlFile['pressure']
conds=exp['simulation'].fullParsedYamlFile['conditions']
jsr1=jsr.JSR_multiTemp_steadystate(volume=exp['simulation'].fullParsedYamlFile['volume'],
pressure=pressures,
temperatures=temprange,
observables=exp['simulation'].fullParsedYamlFile['observables'],
kineticSens=0,
physicalSens=0,
conditions=conds,
thermalBoundary=exp['simulation'].fullParsedYamlFile['thermalBoundary'],
mechanicalBoundary=exp['simulation'].fullParsedYamlFile['mechanicalBoundary'],
processor=p,
save_physSensHistories=0,
save_timeHistories=0,
residence_time=exp['simulation'].fullParsedYamlFile['residence_time'],
moleFractionObservables = exp['simulation'].fullParsedYamlFile['moleFractionObservables'],
concentrationObservables = exp['simulation'].fullParsedYamlFile['concentrationObservables'],
fullParsedYamlFile = exp['simulation'].fullParsedYamlFile)
soln,temp=jsr1.run()
#print(soln)
return soln
def plotting_observables(self,sigmas_original=[],sigmas_optimized=[],file_identifier='',filetype='.png'):
for i,exp in enumerate(self.exp_dict_list_optimized):
observable_counter=0
for j,observable in enumerate(exp['mole_fraction_observables'] + exp['concentration_observables'] + exp['ignition_delay_observables']):
if observable == None:
continue
plt.figure()
if observable in exp['mole_fraction_observables']:
if re.match('[Ss]hock [Tt]ube',exp['simulation_type']) and re.match('[Ss]pecies[ -][Pp]rofile',exp['experiment_type']):
plt.plot(exp['simulation'].timeHistories[0]['time']*1e3,exp['simulation'].timeHistories[0][observable],'b',label='MSI')
plt.plot(self.exp_dict_list_original[i]['simulation'].timeHistories[0]['time']*1e3,self.exp_dict_list_original[i]['simulation'].timeHistories[0][observable],'r',label= "$\it{A priori}$ model")
plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3,exp['experimental_data'][observable_counter][observable],'o',color='black',label='Experimental Data')
plt.xlabel('Time (ms)')
plt.ylabel('Mole Fraction '+''+str(observable))
plt.title('Experiment_'+str(i+1))
if bool(sigmas_optimized) == True:
high_error_optimized = np.exp(sigmas_optimized[i][observable_counter])
high_error_optimized = np.multiply(high_error_optimized,exp['simulation'].timeHistoryInterpToExperiment[observable].dropna().values)
low_error_optimized = np.exp(sigmas_optimized[i][observable_counter]*-1)
low_error_optimized = np.multiply(low_error_optimized,exp['simulation'].timeHistoryInterpToExperiment[observable].dropna().values)
plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3, high_error_optimized,'b--')
plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3,low_error_optimized,'b--')
# high_error_original = np.exp(sigmas_original[i][observable_counter])
# high_error_original = np.multiply(high_error_original,self.exp_dict_list_original[i]['simulation'].timeHistoryInterpToExperiment[observable].dropna().values)
# low_error_original = np.exp(sigmas_original[i][observable_counter]*-1)
# low_error_original = np.multiply(low_error_original,self.exp_dict_list_original[i]['simulation'].timeHistoryInterpToExperiment[observable].dropna().values)
# plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3, high_error_original,'r--')
# plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3,low_error_original,'r--')
#stub
plt.plot([],'w' ,label= 'T:'+ str(self.exp_dict_list_original[i]['simulation'].temperature))
#plt.plot([],'w', label= 'P:'+ str(self.exp_dict_list_original[i]['simulation'].pressure))
key_list = []
for key in self.exp_dict_list_original[i]['simulation'].conditions.keys():
plt.plot([],'w',label= key+': '+str(self.exp_dict_list_original[i]['simulation'].conditions[key]))
key_list.append(key)
#plt.legend(handlelength=3)
plt.legend(ncol=2)
sp = '_'.join(key_list)
#print(sp)
#plt.savefig(self.working_directory+'/'+'Experiment_'+str(i+1)+'_'+str(observable)+'_'+str(self.exp_dict_list_original[i]['simulation'].temperature)+'K'+'_'+str(self.exp_dict_list_original[i]['simulation'].pressure)+'_'+sp+'_'+'.pdf', bbox_inches='tight')
#stub
plt.savefig(self.working_directory+'/'+'Exp_'+str(i+1)+'_'+str(observable)+'_'+str(self.exp_dict_list_original[i]['simulation'].temperature)+'K_'+sp+'.pdf', bbox_inches='tight')
#plt.savefig(self.working_directory+'/'+'Exp_'+str(i+1)+'_'+str(observable)+'_'+str(self.exp_dict_list_original[i]['simulation'].temperature)+'K_'+sp+'.svg', bbox_inches='tight',transparent=True)
observable_counter+=1
elif re.match('[Jj][Ss][Rr]',exp['simulation_type']):
nominal=self.run_jsr(self.exp_dict_list_original[i],self.nominal_cti)
MSI_model=self.run_jsr(exp,self.new_cti)
plt.plot(MSI_model['temperature'],MSI_model[observable],'b',label='MSI')
plt.plot(nominal['temperature'],nominal[observable],'r',label= "$\it{A priori}$ model")
plt.plot(exp['experimental_data'][observable_counter]['Temperature'],exp['experimental_data'][observable_counter][observable],'o',color='black',label='Experimental Data')
plt.xlabel('Temperature (K)')
plt.ylabel('Mole Fraction '+''+str(observable))
plt.title('Experiment_'+str(i+1))
if bool(sigmas_optimized) == True:
high_error_optimized = np.exp(sigmas_optimized[i][observable_counter])
print(high_error_optimized)
high_error_optimized = np.multiply(high_error_optimized,exp['simulation'].timeHistories[0][observable].dropna().values)
low_error_optimized = np.exp(sigmas_optimized[i][observable_counter]*-1)
low_error_optimized = np.multiply(low_error_optimized,exp['simulation'].timeHistories[0][observable].dropna().values)
#plt.figure()
if len(high_error_optimized)>1 and len(low_error_optimized) > 1:
plt.plot(exp['experimental_data'][observable_counter]['Temperature'], high_error_optimized,'b--')
plt.plot(exp['experimental_data'][observable_counter]['Temperature'],low_error_optimized,'b--')
else:
print(high_error_optimized,observable,exp['simulation'].timeHistories[0][observable].dropna().values)
plt.plot(exp['experimental_data'][observable_counter]['Temperature'], high_error_optimized,'rX')
plt.plot(exp['experimental_data'][observable_counter]['Temperature'],low_error_optimized,'bX')
#high_error_original = np.exp(sigmas_original[i][observable_counter])
# high_error_original = np.multiply(high_error_original,self.exp_dict_list_original[i]['simulation'].timeHistoryInterpToExperiment[observable].dropna().values)
#low_error_original = np.exp(sigmas_original[i][observable_counter]*-1)
#low_error_original = np.multiply(low_error_original,self.exp_dict_list_original[i]['simulation'].timeHistoryInterpToExperiment[observable].dropna().values)
#plt.figure()
# plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3, high_error_original,'r--')
#plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3,low_error_original,'r--')
plt.savefig(os.path.join(self.working_directory,'Experiment_'+str(i+1)+'_'+str(observable)+file_identifier+filetype), bbox_inches='tight',dpi=500)
observable_counter+=1
elif re.match('[Ss]pecies[- ][Pp]rofile',exp['experiment_type']) and re.match('[Ff]low[ -][Rr]eactor',exp['simulation_type']):
plt.plot(exp['simulation'].timeHistories[0]['temperature'],exp['simulation'].timeHistories[0][observable],'b',label='MSI')
plt.plot(self.exp_dict_list_original[i]['simulation'].timeHistories[0]['temperature'],self.exp_dict_list_original['simulation'].timeHistories[0][observable],'r',label= "$\it{A priori}$ model")
plt.plot(exp['experimental_data'][observable_counter]['Temperature'],exp['experimental_data'][observable_counter][observable],'o',color='black',label='Experimental Data')
plt.xlabel('Time (ms)')
plt.ylabel('Mole Fraction '+''+str(observable))
plt.title('Experiment_'+str(i+1))
if bool(sigmas_optimized) == True:
high_error_optimized = np.exp(sigmas_optimized[i][observable_counter])
high_error_optimized = np.multiply(high_error_optimized,exp['simulation'].timeHistories[0][observable].dropna().values)
low_error_optimized = np.exp(sigmas_optimized[i][observable_counter]*-1)
low_error_optimized = np.multiply(low_error_optimized,exp['simulation'].timeHistories[0][observable].dropna().values)
plt.plot(exp['experimental_data'][observable_counter]['Temperature']*1e3, high_error_optimized,'b--')
plt.plot(exp['experimental_data'][observable_counter]['Temperature']*1e3,low_error_optimized,'b--')
#high_error_original = np.exp(sigmas_original[i][observable_counter])
#high_error_original = np.multiply(high_error_original,self.exp_dict_list_original[i]['simulation'].timeHistories[0][observable].dropna().values)
#low_error_original = np.exp(sigmas_original[i][observable_counter]*-1)
#low_error_original = np.multiply(low_error_original,self.exp_dict_list_original[i]['simulation'].timeHistoryInterpToExperiment[observable].dropna().values)
#plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3, high_error_original,'r--')
#plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3,low_error_original,'r--')
plt.savefig(self.working_directory+'/'+'Experiment_'+str(i+1)+'_'+str(observable)+'.pdf', bbox_inches='tight',dpi=1000)
if observable in exp['concentration_observables']:
if re.match('[Ss]hock [Tt]ube',exp['simulation_type']) and re.match('[Ss]pecies[ -][Pp]rofile',exp['experiment_type']):
if observable+'_ppm' in exp['experimental_data'][observable_counter].columns:
plt.plot(exp['simulation'].timeHistories[0]['time']*1e3,exp['simulation'].timeHistories[0][observable]*1e6,'b',label='MSI')
plt.plot(self.exp_dict_list_original[i]['simulation'].timeHistories[0]['time']*1e3,self.exp_dict_list_original[i]['simulation'].timeHistories[0][observable]*1e6,'r',label= "$\it{A priori}$ model")
plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3,exp['experimental_data'][observable_counter][observable+'_ppm'],'o',color='black',label='Experimental Data')
plt.xlabel('Time (ms)')
plt.ylabel(str(observable)+ ' '+ 'ppm')
plt.title('Experiment_'+str(i+1))
if bool(sigmas_optimized)==True:
high_error_optimized = np.exp(sigmas_optimized[i][observable_counter])
high_error_optimized = np.multiply(high_error_optimized,exp['simulation'].timeHistoryInterpToExperiment[observable].dropna().values*1e6)
low_error_optimized = np.exp(np.array(sigmas_optimized[i][observable_counter])*-1)
low_error_optimized = np.multiply(low_error_optimized,exp['simulation'].timeHistoryInterpToExperiment[observable].dropna().values*1e6)
plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3, high_error_optimized,'b--')
plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3,low_error_optimized,'b--')
#high_error_original = np.exp(sigmas_original[i][observable_counter])
#high_error_original = np.multiply(high_error_original,self.exp_dict_list_original[i]['simulation'].timeHistoryInterpToExperiment[observable].dropna().values*1e6)
#low_error_original = np.exp(np.array(sigmas_original[i][observable_counter])*-1)
#low_error_original = np.multiply(low_error_original,self.exp_dict_list_original[i]['simulation'].timeHistoryInterpToExperiment[observable].dropna().values*1e6)
#plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3, high_error_original,'r--')
#plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3,low_error_original,'r--')
elif observable+'_mol/cm^3' in exp['experimental_data'][observable_counter].columns:
concentration_optimized = np.true_divide(1,exp['simulation'].timeHistories[0]['temperature'].to_numpy())*exp['simulation'].timeHistories[0]['pressure'].to_numpy()
concentration_optimized *= (1/(8.314e6))*exp['simulation'].timeHistories[0][observable].dropna().to_numpy()
concentration_original = np.true_divide(1,self.exp_dict_list_original[i]['simulation'].timeHistories[0]['temperature'].to_numpy())*self.exp_dict_list_original[i]['simulation'].timeHistories[0]['pressure'].to_numpy()
concentration_original *= (1/(8.314e6))*self.exp_dict_list_original[i]['simulation'].timeHistories[0][observable].dropna().to_numpy()
plt.plot(exp['simulation'].timeHistories[0]['time']*1e3,concentration_optimized,'b',label='MSI')
plt.plot(self.exp_dict_list_original[i]['simulation'].timeHistories[0]['time']*1e3,concentration_original,'r',label= "$\it{A priori}$ model")
plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3,exp['experimental_data'][observable_counter][observable+'_mol/cm^3'],'o',color='black',label='Experimental Data')
plt.xlabel('Time (ms)')
plt.ylabel(r'$\frac{mol}{cm^3}$'+''+str(observable))
plt.title('Experiment_'+str(i+1))
if bool(sigmas_optimized)==True:
concentration_sig = np.true_divide(1,exp['simulation'].pressureAndTemperatureToExperiment[observable_counter]['temperature'].to_numpy())*exp['simulation'].pressureAndTemperatureToExperiment[observable_counter]['pressure'].to_numpy()
concentration_sig *= (1/(8.314e6))*exp['simulation'].timeHistoryInterpToExperiment[observable].dropna().to_numpy()
high_error_optimized = np.exp(sigmas_optimized[i][observable_counter])
high_error_optimized = np.multiply(high_error_optimized,concentration_sig)
low_error_optimized = np.exp(np.array(sigmas_optimized[i][observable_counter])*-1)
low_error_optimized = np.multiply(low_error_optimized,concentration_sig)
plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3, high_error_optimized,'b--')
plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3,low_error_optimized,'b--')
plt.plot([],'w' ,label= 'T:'+ str(self.exp_dict_list_original[i]['simulation'].temperature))
#plt.plot([],'w', label= 'P:'+ str(self.exp_dict_list_original[i]['simulation'].pressure))
key_list = []
for key in self.exp_dict_list_original[i]['simulation'].conditions.keys():
plt.plot([],'w',label= key+': '+str(self.exp_dict_list_original[i]['simulation'].conditions[key]))
key_list.append(key)
#plt.legend(handlelength=3)
plt.legend(ncol=2)
sp = '_'.join(key_list)
#print(sp)
#plt.savefig(self.working_directory+'/'+'Experiment_'+str(i+1)+'_'+str(observable)+'_'+str(self.exp_dict_list_original[i]['simulation'].temperature)+'K'+'_'+str(self.exp_dict_list_original[i]['simulation'].pressure)+'_'+sp+'_'+'.pdf', bbox_inches='tight')
#stub
plt.savefig(self.working_directory+'/'+'Exp_'+str(i+1)+'_'+str(observable)+'_'+str(self.exp_dict_list_original[i]['simulation'].temperature)+'K_'+sp+'.pdf', bbox_inches='tight')
#plt.savefig(self.working_directory+'/'+'Exp_'+str(i+1)+'_'+str(observable)+'_'+str(self.exp_dict_list_original[i]['simulation'].temperature)+'K_'+sp+'.svg', bbox_inches='tight',transparent=True)
observable_counter+=1
elif re.match('[Ff]low [Rr]eactor',exp['simulation_type']) and re.match('[Ss]pecies[ -][Pp]rofile',exp['experiment_type']):
plt.plot(exp['simulation'].timeHistories[0]['initial_temperature'],exp['simulation'].timeHistories[0][observable]*1e6,'b',label='MSI')
plt.plot(self.exp_dict_list_original[i]['simulation'].timeHistories[0]['initial_temperature'],self.exp_dict_list_original[i]['simulation'].timeHistories[0][observable]*1e6,'r',label= "$\it{A priori}$ model")
plt.plot(exp['experimental_data'][observable_counter]['Temperature'],exp['experimental_data'][observable_counter][observable+'_ppm'],'o',color='black',label='Experimental Data')
plt.xlabel('Temperature (K)')
plt.ylabel('ppm '+''+str(observable))
plt.title('Experiment_'+str(i+1))
if bool(sigmas_optimized) == True:
#stub
high_error_optimized = np.exp(sigmas_optimized[i][observable_counter])
high_error_optimized = np.multiply(high_error_optimized,exp['simulation'].timeHistories[0][observable].dropna().values*1e6)
low_error_optimized = np.exp(sigmas_optimized[i][observable_counter]*-1)
low_error_optimized = np.multiply(low_error_optimized,exp['simulation'].timeHistories[0][observable].dropna().values*1e6)
plt.plot(exp['experimental_data'][observable_counter]['Temperature'], high_error_optimized,'b--')
plt.plot(exp['experimental_data'][observable_counter]['Temperature'],low_error_optimized,'b--')
#high_error_original = np.exp(sigmas_original[i][observable_counter])
#high_error_original = np.multiply(high_error_original,self.exp_dict_list_original[i]['simulation'].timeHistories[0][observable].dropna().values)
#low_error_original = np.exp(sigmas_original[i][observable_counter]*-1)
#low_error_original = np.multiply(low_error_original,self.exp_dict_list_original[i]['simulation'].timeHistoryInterpToExperiment[observable].dropna().values)
#plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3, high_error_original,'r--')
#plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3,low_error_original,'r--')
plt.savefig(self.working_directory+'/'+'Experiment_'+str(i+1)+'_'+str(observable)+'.png', bbox_inches='tight',dpi=1000)
observable_counter+=1
elif re.match('[Jj][Ss][Rr]',exp['simulation_type']):
nominal=self.run_jsr(self.exp_dict_list_original[i],self.nominal_cti)
MSI_model=self.run_jsr(exp,self.new_cti)
plt.plot(MSI_model['temperature'],MSI_model[observable]*1e6,'b',label='MSI')
plt.plot(nominal['temperature'],nominal[observable]*1e6,'r',label= "$\it{A priori}$ model")
plt.plot(exp['experimental_data'][observable_counter]['Temperature'],exp['experimental_data'][observable_counter][observable+'_ppm'],'o',color='black',label='Experimental Data')
plt.xlabel('Temperature (K)')
plt.ylabel('ppm '+''+str(observable))
plt.title('Experiment_'+str(i+1))
if bool(sigmas_optimized) == True:
high_error_optimized = np.exp(sigmas_optimized[i][observable_counter])
print(high_error_optimized)
high_error_optimized = np.multiply(high_error_optimized,exp['simulation'].timeHistories[0][observable].dropna().values)
low_error_optimized = np.exp(sigmas_optimized[i][observable_counter]*-1)
low_error_optimized = np.multiply(low_error_optimized,exp['simulation'].timeHistories[0][observable].dropna().values)
#plt.figure()
if len(high_error_optimized)>1 and len(low_error_optimized) > 1:
plt.plot(exp['experimental_data'][observable_counter]['Temperature'],high_error_optimized*1e6,'b--')
plt.plot(exp['experimental_data'][observable_counter]['Temperature'],low_error_optimized*1e6,'b--')
else:
print(high_error_optimized,observable,exp['simulation'].timeHistories[0][observable].dropna().values)
plt.plot(exp['experimental_data'][observable_counter]['Temperature'], high_error_optimized,'rX')
plt.plot(exp['experimental_data'][observable_counter]['Temperature'],low_error_optimized,'bX')
#high_error_original = np.exp(sigmas_original[i][observable_counter])
# high_error_original = np.multiply(high_error_original,self.exp_dict_list_original[i]['simulation'].timeHistoryInterpToExperiment[observable].dropna().values)
#low_error_original = np.exp(sigmas_original[i][observable_counter]*-1)
#low_error_original = np.multiply(low_error_original,self.exp_dict_list_original[i]['simulation'].timeHistoryInterpToExperiment[observable].dropna().values)
#plt.figure()
# plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3, high_error_original,'r--')
#plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3,low_error_original,'r--')
plt.savefig(os.path.join(self.working_directory,'Experiment_'+str(i+1)+'_'+str(observable)+file_identifier+filetype), bbox_inches='tight',dpi=100)
observable_counter+=1
if observable in exp['ignition_delay_observables']:
if re.match('[Ss]hock [Tt]ube',exp['simulation_type']):
if len(exp['simulation'].temperatures)>1:
nominal=self.run_ignition_delay(self.exp_dict_list_original[i], self.nominal_cti)
MSI_model=self.run_ignition_delay(exp, self.new_cti)
#plt.semilogy(1000/MSI_model['temperature'],MSI_model['delay'],'b',label='MSI')
#changed to plotting at nominal temperature
#plt.semilogy(1000/MSI_model['temperature'],MSI_model['delay'],'b',label='MSI')
#a, b = zip(*sorted(zip(1000/exp['experimental_data'][observable_counter]['temperature'],exp['simulation'].timeHistories[0]['delay'].dropna().values)))
a, b = zip(*sorted(zip(1000/np.array(exp['simulation'].temperatures),exp['simulation'].timeHistories[0]['delay'].dropna().values)))
plt.semilogy(a,b,'b',label='MSI')
plt.semilogy(1000/nominal['temperature'],nominal['delay'],'r',label= "$\it{A priori}$ model")
#plt.semilogy(1000/exp['simulation'].timeHistories[0]['temperature'],exp['simulation'].timeHistories[0]['delay'],'b',label='MSI')
#plt.semilogy(1000/self.exp_dict_list_original[i]['simulation'].timeHistories[0]['temperature'],self.exp_dict_list_original[i]['simulation'].timeHistories[0]['delay'],'r',label= "$\it{A priori}$ model")
plt.semilogy(1000/exp['experimental_data'][observable_counter]['temperature'],exp['experimental_data'][observable_counter][observable+'_s'],'o',color='black',label='Experimental Data')
plt.xlabel('1000/T')
plt.ylabel('Time (s)')
plt.title('Experiment_'+str(i+1))
if bool(sigmas_optimized) == True:
high_error_optimized = np.exp(sigmas_optimized[i][observable_counter])
high_error_optimized = np.multiply(high_error_optimized,exp['simulation'].timeHistories[0]['delay'].dropna().values)
low_error_optimized = np.exp(sigmas_optimized[i][observable_counter]*-1)
low_error_optimized = np.multiply(low_error_optimized,exp['simulation'].timeHistories[0]['delay'].dropna().values)
#plt.figure()
#print(exp['simulation'].timeHistories[0]['delay'].dropna().values,'THIS IS IN THE PLOTTER')
#a, b = zip(*sorted(zip(1000/exp['experimental_data'][observable_counter]['temperature'],high_error_optimized)))
a, b = zip(*sorted(zip(1000/np.array(exp['simulation'].temperatures),high_error_optimized)))
plt.semilogy(a,b,'b--')
#a, b = zip(*sorted(zip(1000/exp['experimental_data'][observable_counter]['temperature'],low_error_optimized)))
a, b = zip(*sorted(zip(1000/np.array(exp['simulation'].temperatures),low_error_optimized)))
plt.semilogy(a,b,'b--')
#plt.plot(1000/exp['experimental_data'][observable_counter]['temperature'],exp['simulation'].timeHistories[0]['delay'].dropna().values,'x')
#plt.plot(1000/np.array(exp['simulation'].temperatures),exp['simulation'].timeHistories[0]['delay'].dropna().values,'o')
#high_error_original = np.exp(sigmas_original[i][observable_counter])
# high_error_original = np.multiply(high_error_original,self.exp_dict_list_original[i]['simulation'].timeHistoryInterpToExperiment[observable].dropna().values)
#low_error_original = np.exp(sigmas_original[i][observable_counter]*-1)
#low_error_original = np.multiply(low_error_original,self.exp_dict_list_original[i]['simulation'].timeHistoryInterpToExperiment[observable].dropna().values)
#plt.figure()
# plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3, high_error_original,'r--')
#plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3,low_error_original,'r--')
#plt.plot([],'w', label= 'P:'+ str(self.exp_dict_list_original[i]['simulation'].pressures))
key_list = []
for key in self.exp_dict_list_original[i]['simulation'].fullParsedYamlFile['conditions_to_run'][0].keys():
# ['simulation'].fullParsedYamlFile['conditions_to_run']
plt.plot([],'w',label= key+': '+str(self.exp_dict_list_original[i]['simulation'].fullParsedYamlFile['conditions_to_run'][0][key]))
key_list.append(key)
#plt.legend(handlelength=3)
plt.legend(ncol=2)
sp = '_'.join(key_list)
plt.savefig(os.path.join(self.working_directory,'Experiment_'+str(i+1)+'_'+str(observable)+'.pdf'), bbox_inches='tight',dpi=1000)
plt.savefig(os.path.join(self.working_directory,'Experiment_'+str(i+1)+'_'+str(observable)+'.svg'), bbox_inches='tight',dpi=1000)
observable_counter+=1
elif re.match('[Rr][Cc][Mm]',exp['simulation_type']):
if len(exp['simulation'].temperatures)>1:
plt.semilogy(1000/exp['simulation'].timeHistories[0]['ignition_temperature'],exp['simulation'].timeHistories[0]['delay']-exp['simulation'].timeHistories[0]['end_of_compression_time'],'b',label='MSI')
plt.semilogy(1000/self.exp_dict_list_original[i]['simulation'].timeHistories[0]['ignition_temperature'],self.exp_dict_list_original[i]['simulation'].timeHistories[0]['delay']-self.exp_dict_list_original[i]['simulation'].timeHistories[0]['end_of_compression_time'],'r',label= "$\it{A priori}$ model")
#plt.semilogy(1000/exp['simulation'].timeHistories[0]['temperature'],exp['simulation'].timeHistories[0]['delay'],'b',label='MSI')
#plt.semilogy(1000/self.exp_dict_list_original[i]['simulation'].timeHistories[0]['temperature'],self.exp_dict_list_original[i]['simulation'].timeHistories[0]['delay'],'r',label= "$\it{A priori}$ model")
plt.semilogy(1000/exp['experimental_data'][observable_counter]['temperature'],exp['experimental_data'][observable_counter][observable+'_s'],'o',color='black',label='Experimental Data')
plt.xlabel('1000/T (1000/K)')
plt.ylabel('Time (ms)')
plt.title('Experiment_'+str(i+1))
if bool(sigmas_optimized) == True:
high_error_optimized = np.exp(sigmas_optimized[i][observable_counter])
high_error_optimized = np.multiply(high_error_optimized,(exp['simulation'].timeHistories[0]['delay']-exp['simulation'].timeHistories[0]['end_of_compression_time']).dropna().values)
low_error_optimized = np.exp(sigmas_optimized[i][observable_counter]*-1)
low_error_optimized = np.multiply(low_error_optimized,(exp['simulation'].timeHistories[0]['delay']-exp['simulation'].timeHistories[0]['end_of_compression_time']).dropna().values)
#plt.figure()
a, b = zip(*sorted(zip(1000/exp['experimental_data'][observable_counter]['ignition_temperature'],high_error_optimized)))
plt.semilogy(a,b,'b--')
#plt.plot(1000/exp['experimental_data'][observable_counter]['temperature'],low_error_optimized,'b--')
a, b = zip(*sorted(zip(1000/exp['experimental_data'][observable_counter]['ignition_temperature'],low_error_optimized)))
plt.semilogy(a,b,'b--')
#high_error_original = np.exp(sigmas_original[i][observable_counter])
# high_error_original = np.multiply(high_error_original,self.exp_dict_list_original[i]['simulation'].timeHistoryInterpToExperiment[observable].dropna().values)
#low_error_original = np.exp(sigmas_original[i][observable_counter]*-1)
#low_error_original = np.multiply(low_error_original,self.exp_dict_list_original[i]['simulation'].timeHistoryInterpToExperiment[observable].dropna().values)
#plt.figure()
# plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3, high_error_original,'r--')
#plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3,low_error_original,'r--')
plt.savefig(os.path.join(self.working_directory,'Experiment_'+str(i+1)+'_'+str(observable)+'.pdf'), bbox_inches='tight',dpi=1000)
observable_counter+=1
if 'perturbed_coef' in exp.keys():
wavelengths = self.parsed_yaml_list[i]['absorbanceCsvWavelengths']
plt.figure()
for k,wl in enumerate(wavelengths):
plt.plot(exp['absorbance_experimental_data'][k]['time']*1e3,exp['absorbance_experimental_data'][k]['Absorbance_'+str(wl)],'o',color='black',label='Experimental Data')
plt.plot(exp['simulation'].timeHistories[0]['time']*1e3,exp['absorbance_calculated_from_model'][wl],'b',label='MSI')
plt.plot(self.exp_dict_list_original[i]['simulation'].timeHistories[0]['time']*1e3,self.exp_dict_list_original[i]['absorbance_calculated_from_model'][wl],'r',label= "$\it{A priori}$ model")
#plt.plot(exp['absorbance_experimental_data'][k]['time']*1e3,exp['absorbance_experimental_data'][k]['Absorbance_'+str(wl)],'o',color='black',label='Experimental Data')
plt.xlabel('Time (ms)')
plt.ylabel('Absorbance'+''+str(wl))
plt.title('Experiment_'+str(i+1))
if bool(sigmas_optimized)==True:
high_error_optimized = np.exp(sigmas_optimized[i][observable_counter])
high_error_optimized = np.multiply(high_error_optimized,exp['absorbance_model_data'][wl])
low_error_optimized = np.exp(sigmas_optimized[i][observable_counter]*-1)
low_error_optimized = np.multiply(low_error_optimized,exp['absorbance_model_data'][wl])
plt.plot(exp['absorbance_experimental_data'][k]['time']*1e3,high_error_optimized,'b--')
plt.plot(exp['absorbance_experimental_data'][k]['time']*1e3,low_error_optimized,'b--')
high_error_original = np.exp(sigmas_original[i][observable_counter])
high_error_original = np.multiply(high_error_original,self.exp_dict_list_original[i]['absorbance_model_data'][wl])
low_error_original = np.exp(sigmas_original[i][observable_counter]*-1)
low_error_original = np.multiply(low_error_original,self.exp_dict_list_original[i]['absorbance_model_data'][wl])
#plt.plot(exp['absorbance_experimental_data'][k]['time']*1e3,high_error_original,'r--')
#plt.plot(exp['absorbance_experimental_data'][k]['time']*1e3,low_error_original,'r--')
# if bool(sigmas_optimized)==True and i+1 == 11:
# plt.ylim(top=.35)
#start here
key_list=[]
plt.plot([],'w' ,label= 'T:'+ str(self.exp_dict_list_original[i]['simulation'].temperature))
#plt.plot([],'w', label= 'P:'+ str(self.exp_dict_list_original[i]['simulation'].pressure))
for key in self.exp_dict_list_original[i]['simulation'].conditions.keys():
plt.plot([],'w',label= key+': '+str(self.exp_dict_list_original[i]['simulation'].conditions[key]))
key_list.append(key)
#plt.legend(handlelength=3)
plt.legend(ncol=2)
#plt.savefig(self.working_directory+'/'+'Exp_'+str(i+1)+'_'+str(observable)+'_'+str(self.exp_dict_list_original[i]['simulation'].temperature)+'K_'+sp+'.pdf', bbox_inches='tight')
sp = '_'.join(key_list)
plt.savefig(self.working_directory+'/'+'Exp_'+str(i+1)+' '+'Absorb at'+'_'+str(wl)+'_'+str(self.exp_dict_list_original[i]['simulation'].temperature)+'K_'+sp+'.pdf', bbox_inches='tight')
plt.savefig(self.working_directory+'/'+'Exp_'+str(i+1)+' '+'Absorb at'+'_'+str(wl)+'_'+str(self.exp_dict_list_original[i]['simulation'].temperature)+'K_'+sp+'.svg', bbox_inches='tight',transparent=True)
# make function to plot rate constants
def plotting_rate_constants(self,optimized_cti_file='',
original_cti_file='',
initial_temperature=250,
final_temperature=2500,
master_equation_reactions=[]):
gas_optimized = ct.Solution(optimized_cti_file)
gas_original = ct.Solution(original_cti_file)
def unique_list(seq):
checked = []
for e in seq:
if e not in checked:
checked.append(e)
return checked
def target_values_for_S(target_value_csv,
exp_dict_list,
S_matrix,
master_equation_reaction_list = [],
master_equation_sensitivites = {}):
target_value_csv = pd.read_csv(target_value_csv)
target_reactions = target_value_csv['Reaction']
target_temp = target_value_csv['temperature']
target_press = target_value_csv['pressure']
target_k = target_value_csv['k']
reactions_in_cti_file = exp_dict_list[0]['simulation'].processor.solution.reaction_equations()
number_of_reactions_in_cti = len(reactions_in_cti_file)
As = []
Ns = []
Eas = []
def create_empty_nested_reaction_list():
nested_reaction_list = [[] for x in range(len(master_equation_reaction_list))]
for reaction in master_equation_reaction_list:
for i,MP in enumerate(master_equation_sensitivites[reaction]):
nested_reaction_list[master_equation_reaction_list.index(reaction)].append(0)
return nested_reaction_list
def create_tuple_list(array_of_sensitivities):
#stub
tuple_list = []
for ix,iy in np.ndindex(array_of_sensitivities.shape):
tuple_list.append((ix,iy))
return tuple_list
MP_stack = []
target_values_to_stack = []
master_equation_cheby_instance = meq.Master_Equation(T_min=self.T_min,T_max=self.T_max,P_min=self.P_min,P_max=self.P_max)
for i,reaction in enumerate(target_reactions):
if reaction in master_equation_reaction_list:
nested_reaction_list = create_empty_nested_reaction_list()
for j, MP_array in enumerate(master_equation_sensitivites[reaction]):
tuple_list = create_tuple_list(MP_array)
temp = []
counter = 0
for sensitivity in np.nditer(MP_array,order='C'):
k = tuple_list[counter][0]
l= tuple_list[counter][1]
counter +=1
#need to add reduced p and t, and check these units were using to map
#these might not work
#t_alpha= meq.Master_Equation.chebyshev_specific_poly(self,k,meq.Master_Equation.calc_reduced_T(self,target_temp[i]))
t_alpha= master_equation_cheby_instance.chebyshev_specific_poly(k,master_equation_cheby_instance.calc_reduced_T(target_temp[i]))
if target_press[i] ==0:
target_press_new = 1e-9
else:
target_press_new=target_press[i]
#p_alpha = meq.Master_Equation.chebyshev_specific_poly(self,l,meq.Master_Equation.calc_reduced_P(self,target_press_new*101325))
p_alpha = master_equation_cheby_instance.chebyshev_specific_poly(l,master_equation_cheby_instance.calc_reduced_P(target_press_new*101325))
#these might nowt work
single_alpha_map = t_alpha*p_alpha*sensitivity
temp.append(single_alpha_map)
temp =sum(temp)
#should there be an = temp here
#nested_reaction_list[master_equation_reaction_list.index(reaction)][j]=temp
nested_reaction_list[master_equation_reaction_list.index(reaction)][j]=temp
temp2 = nested_reaction_list
flat_list = [item for sublist in temp2 for item in sublist]
#print(flat_list)
MP_stack.append(nested_reaction_list)
flat_list = np.array(flat_list)
flat_list = flat_list.reshape((1,flat_list.shape[0]))
target_values_to_stack.append(flat_list)
else:
#this will need to get fixed if we want to handle all reactions as chevy
A_temp = np.zeros((1,number_of_reactions_in_cti-len(master_equation_reaction_list)))
N_temp = np.zeros((1,number_of_reactions_in_cti-len(master_equation_reaction_list)))
Ea_temp = np.zeros((1,number_of_reactions_in_cti-len(master_equation_reaction_list)))
#decide if this mapping is correct
A_temp[0,reactions_in_cti_file.index(reaction)] = 1
N_temp [0,reactions_in_cti_file.index(reaction)] = np.log(target_temp[i])
Ea_temp[0,reactions_in_cti_file.index(reaction)] = (-1/target_temp[i])
As.append(A_temp)
Ns.append(N_temp)
Eas.append(Ea_temp)
A_temp = A_temp.reshape((1,A_temp.shape[1]))
N_temp = N_temp.reshape((1,N_temp.shape[1]))
Ea_temp = Ea_temp.reshape((1,Ea_temp.shape[1]))
target_values_to_stack.append(np.hstack((A_temp,N_temp,Ea_temp)))
# might need to edit this to pass in s? and
S_matrix = S_matrix
shape_s = S_matrix.shape
S_target_values = []
for i,row in enumerate(target_values_to_stack):
if target_reactions[i] in master_equation_reaction_list:
zero_to_append_infront = np.zeros((1,((number_of_reactions_in_cti-len(master_equation_reaction_list))*3)))
zero_to_append_behind = np.zeros((1, shape_s[1] - ((number_of_reactions_in_cti-len(master_equation_reaction_list))*3) - np.shape(row)[1] ))
temp_array = np.hstack((zero_to_append_infront,row,zero_to_append_behind))
S_target_values.append(temp_array)
else:
zero_to_append_behind = np.zeros((1,shape_s[1]-np.shape(row)[1]))
temp_array = np.hstack((row,zero_to_append_behind))
S_target_values.append(temp_array)
S_target_values = np.vstack((S_target_values))
return S_target_values
def sort_rate_constant_target_values(parsed_csv,unique_reactions,gas):
reaction_list_from_mechanism = gas.reaction_equations()
target_value_ks = [[] for reaction in range(len(unique_reactions))]
target_value_temps = [[] for reaction in range(len(unique_reactions))]
reaction_list_from_mechanism = gas.reaction_equations()
for i,reaction in enumerate(parsed_csv['Reaction']):
idx = reaction_list_from_mechanism.index(reaction)
target_value_ks[unique_reactions.index(idx)].append(parsed_csv['k'][i])
target_value_temps[unique_reactions.index(idx)].append(parsed_csv['temperature'][i])
return target_value_temps,target_value_ks
def rate_constant_over_temperature_range_from_cantera(reaction_number,
gas,
initial_temperature=250,
final_temperature=2500,
pressure=1,
conditions = {'H2':2,'O2':1,'N2':4}):
Temp = []
k = []
for temperature in np.arange(initial_temperature,final_temperature,1):
gas.TPX = temperature,pressure*101325,conditions
Temp.append(temperature)
k.append(gas.forward_rate_constants[reaction_number]*1000)
return Temp,k
def calculate_sigmas_for_rate_constants(k_target_value_S_matrix,k_target_values_parsed_csv,unique_reactions,gas,covarience):
reaction_list_from_mechanism = gas.reaction_equations()
sigma_list_for_target_ks = [[] for reaction in range(len(unique_reactions))]
shape = k_target_value_S_matrix.shape
for row in range(shape[0]):
#print(row)
SC = np.dot(k_target_value_S_matrix[row,:],covarience)
sigma_k = np.dot(SC,np.transpose(k_target_value_S_matrix[row,:]))
sigma_k = np.sqrt(sigma_k)
#print(row)
#print(k_target_values_parsed_csv['Reaction'][row])
indx = reaction_list_from_mechanism.index(k_target_values_parsed_csv['Reaction'][row])
sigma_list_for_target_ks[unique_reactions.index(indx)].append(sigma_k)
return sigma_list_for_target_ks
def calculating_target_value_ks_from_cantera_for_sigmas(k_target_values_parsed_csv,gas,unique_reactions):
target_value_ks = [[] for reaction in range(len(unique_reactions))]
target_reactions = k_target_values_parsed_csv['Reaction']
target_temp = k_target_values_parsed_csv['temperature']
target_press = k_target_values_parsed_csv['pressure']
reactions_in_cti_file = gas.reaction_equations()
#print(reactions_in_cti_file)
for i,reaction in enumerate(target_reactions):
if target_press[i] == 0:
pressure = 1e-9
else:
pressure = target_press[i]
gas.TPX = target_temp[i],pressure*101325,{'H2O2':0.003094,'O2':0.000556,'H2O':0.001113,'Ar':0.995237}
reaction_number_in_cti = reactions_in_cti_file.index(reaction)
k = gas.forward_rate_constants[reaction_number_in_cti]
indx = reactions_in_cti_file.index(reaction)
target_value_ks[unique_reactions.index(indx)].append(k*1000)
return target_value_ks
if bool(self.target_value_rate_constant_csv) and self.k_target_values=='On':
S_matrix_k_target_values_extra = target_values_for_S(self.target_value_rate_constant_csv_extra_values,
self.exp_dict_list_optimized,
self.S_matrix,
master_equation_reaction_list = master_equation_reactions,
master_equation_sensitivites=self.cheby_sensitivity_dict)
#paste here
unique_reactions_optimized=[]
unique_reactions_original = []
reaction_list_from_mechanism_original = gas_original.reaction_equations()
reaction_list_from_mechanism = gas_optimized.reaction_equations()
k_target_value_csv_extra = pd.read_csv(self.target_value_rate_constant_csv_extra_values)
k_target_value_csv = pd.read_csv(self.target_value_rate_constant_csv)
for row in range(k_target_value_csv_extra.shape[0]):
unique_reactions_optimized.append(reaction_list_from_mechanism.index(k_target_value_csv_extra['Reaction'][row]))
unique_reactions_original.append(reaction_list_from_mechanism_original.index(k_target_value_csv_extra['Reaction'][row]))
unique_reactions_optimized = unique_list(unique_reactions_optimized)
unique_reactions_original = unique_list(unique_reactions_original)
sigma_list_for_target_ks_optimized = calculate_sigmas_for_rate_constants(S_matrix_k_target_values_extra,k_target_value_csv_extra,unique_reactions_optimized,gas_optimized,self.covarience)
self.sigma_list_for_target_ks_optimized = sigma_list_for_target_ks_optimized
sigma_list_for_target_ks_original = calculate_sigmas_for_rate_constants(S_matrix_k_target_values_extra,k_target_value_csv_extra,unique_reactions_original,gas_original,self.original_covariance)
self.sigma_list_for_target_ks_original = sigma_list_for_target_ks_original
######################
target_value_temps_optimized,target_value_ks_optimized = sort_rate_constant_target_values(k_target_value_csv_extra,unique_reactions_optimized,gas_optimized)
target_value_temps_original,target_value_ks_original = sort_rate_constant_target_values(k_target_value_csv_extra,unique_reactions_original,gas_original)
#############################################
unique_reactions_optimized_for_plotting=[]
unique_reactions_original_for_plotting = []
for row in range(k_target_value_csv.shape[0]):
unique_reactions_optimized_for_plotting.append(reaction_list_from_mechanism.index(k_target_value_csv['Reaction'][row]))
unique_reactions_original_for_plotting.append(reaction_list_from_mechanism_original.index(k_target_value_csv['Reaction'][row]))
unique_reactions_optimized_for_plotting = unique_list(unique_reactions_optimized)
unique_reactions_original_for_plotting = unique_list(unique_reactions_original)
target_value_temps_optimized_for_plotting,target_value_ks_optimized_for_plotting = sort_rate_constant_target_values(k_target_value_csv,unique_reactions_optimized_for_plotting,gas_optimized)
target_value_temps_original_for_plotting,target_value_ks_original_for_plotting = sort_rate_constant_target_values(k_target_value_csv,unique_reactions_original_for_plotting,gas_original)
#############################################
target_value_ks_calculated_with_cantera_optimized = calculating_target_value_ks_from_cantera_for_sigmas(k_target_value_csv_extra,gas_optimized,unique_reactions_optimized)
target_value_ks_calculated_with_cantera_original = calculating_target_value_ks_from_cantera_for_sigmas(k_target_value_csv_extra,gas_original,unique_reactions_original)
for i,reaction in enumerate(unique_reactions_optimized):
plt.figure()
#stub
Temp_optimized,k_optimized = rate_constant_over_temperature_range_from_cantera(reaction,
gas_optimized,
initial_temperature=initial_temperature,
final_temperature=final_temperature,
pressure=1,
conditions={'H2':2,'O2':1,'Ar':4})
plt.semilogy(Temp_optimized,k_optimized,'b')
#calculate sigmas
high_error_optimized = np.exp(np.array(sigma_list_for_target_ks_optimized[i]))
high_error_optimized = np.multiply(high_error_optimized,target_value_ks_calculated_with_cantera_optimized[i])
low_error_optimized = np.exp(np.array(sigma_list_for_target_ks_optimized[i])*-1)
low_error_optimized = np.multiply(low_error_optimized,target_value_ks_calculated_with_cantera_optimized[i])
#plt.semilogy(target_value_temps_optimized[i],high_error_optimized,'b--')
a, b = zip(*sorted(zip(target_value_temps_optimized[i],high_error_optimized)))
plt.semilogy(a,b,'b--')
# print(a,b)
a, b = zip(*sorted(zip(target_value_temps_optimized[i],low_error_optimized)))
plt.semilogy(a,b,'b--')
#stubb
Temp_original,k_original = rate_constant_over_temperature_range_from_cantera(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction]),
gas_original,
initial_temperature=initial_temperature,
final_temperature=final_temperature,
pressure=1,
conditions={'H2':2,'O2':1,'Ar':4})
plt.semilogy(Temp_original,k_original,'r')
high_error_original = np.exp(sigma_list_for_target_ks_original[unique_reactions_original.index(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction]))])
high_error_original = np.multiply(high_error_original,target_value_ks_calculated_with_cantera_original[unique_reactions_original.index(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction]))])
low_error_original = np.exp(np.array(sigma_list_for_target_ks_original[unique_reactions_original.index(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction]))])*-1)
low_error_original = np.multiply(low_error_original,target_value_ks_calculated_with_cantera_original[unique_reactions_original.index(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction]))])
a, b = zip(*sorted(zip(target_value_temps_original[unique_reactions_original.index(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction]))],high_error_original)))
plt.semilogy(a,b,'r--')
a, b = zip(*sorted(zip(target_value_temps_original[unique_reactions_original.index(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction]))],low_error_original)))
plt.semilogy(a,b,'r--')
#plt.semilogy(target_value_temps_optimized[i],target_value_ks_optimized[i],'o',color='black')
plt.semilogy(target_value_temps_optimized_for_plotting[i],target_value_ks_optimized_for_plotting[i],'o',color='black')
plt.xlabel('Temperature (K)')
plt.ylabel('Kmol/m^3-s')
plt.title(reaction_list_from_mechanism[reaction])
plt.savefig(self.working_directory+'/'+reaction_list_from_mechanism[reaction]+'.pdf', bbox_inches='tight')
plt.savefig(self.working_directory+'/'+reaction_list_from_mechanism[reaction]+'.svg', bbox_inches='tight')
elif bool(self.target_value_rate_constant_csv) and self.k_target_values=='Off':
unique_reactions_optimized=[]
unique_reactions_original = []
reaction_list_from_mechanism_original = gas_original.reaction_equations()
reaction_list_from_mechanism = gas_optimized.reaction_equations()
k_target_value_csv = pd.read_csv(self.target_value_rate_constant_csv)
for row in range(k_target_value_csv.shape[0]):
unique_reactions_optimized.append(reaction_list_from_mechanism.index(k_target_value_csv['Reaction'][row]))
unique_reactions_original.append(reaction_list_from_mechanism_original.index(k_target_value_csv['Reaction'][row]))
unique_reactions_optimized = unique_list(unique_reactions_optimized)
unique_reactions_original = unique_list(unique_reactions_original)
######################
target_value_temps_optimized,target_value_ks_optimized = sort_rate_constant_target_values(k_target_value_csv,unique_reactions_optimized,gas_optimized)
target_value_temps_original,target_value_ks_original = sort_rate_constant_target_values(k_target_value_csv,unique_reactions_original,gas_original)
#############################################
target_value_ks_calculated_with_cantera_optimized = calculating_target_value_ks_from_cantera_for_sigmas(k_target_value_csv,gas_optimized,unique_reactions_optimized)
target_value_ks_calculated_with_cantera_original = calculating_target_value_ks_from_cantera_for_sigmas(k_target_value_csv,gas_original,unique_reactions_original)
for i,reaction in enumerate(unique_reactions_optimized):
plt.figure()
Temp_optimized,k_optimized = rate_constant_over_temperature_range_from_cantera(reaction,
gas_optimized,
initial_temperature=250,
final_temperature=2500,
pressure=1,
conditions={'H2':2,'O2':1,'Ar':4})
plt.semilogy(Temp_optimized,k_optimized,'b')
Temp_original,k_original = rate_constant_over_temperature_range_from_cantera(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction]),
gas_original,
initial_temperature=250,
final_temperature=2500,
pressure=1,
conditions={'H2':2,'O2':1,'Ar':4})
plt.semilogy(Temp_original,k_original,'r')
plt.semilogy(target_value_temps_optimized[i],target_value_ks_optimized[i],'o',color='black')
plt.xlabel('Temperature (K)')
plt.ylabel('Kmol/m^3-s')
plt.title(reaction_list_from_mechanism[reaction])
plt.savefig(self.working_directory+'/'+reaction_list_from_mechanism[reaction]+'.pdf', bbox_inches='tight')
plt.savefig(self.working_directory+'/'+reaction_list_from_mechanism[reaction]+'.svg', bbox_inches='tight')
def plotting_X_itterations(self,list_of_X_values_to_plot = [], list_of_X_array=[],number_of_iterations=None):
for value in list_of_X_values_to_plot:
temp = []
for array in list_of_X_array:
temp.append(array[value][0])
plt.figure()
plt.plot(np.arange(0,number_of_iterations,1),temp)
return
def getting_matrix_diag(self,cov_matrix):
diag = cov_matrix.diagonal()
return diag
def Y_matrix_plotter(self,Y_matrix,exp_dict_list_optimized,y_matrix,sigma):
#sigmas =[[] for x in range(len(self.simulation_lengths_of_experimental_data))]
counter=0
for x in range(len(self.simulation_lengths_of_experimental_data)):
observable_counter = 0
for y in range(len(self.simulation_lengths_of_experimental_data[x])):
#for z in np.arange(counter,(self.simulation_lengths_of_experimental_data[x][y]+counter)):
plt.figure()
Y_values_to_plot = list(Y_matrix[counter:self.simulation_lengths_of_experimental_data[x][y]+counter,:])
y_values_to_plot = list(y_matrix[counter:self.simulation_lengths_of_experimental_data[x][y]+counter,:])
sigmas_to_plot = list(sigma[counter:self.simulation_lengths_of_experimental_data[x][y]+counter,:])
if 'perturbed_coef' in exp_dict_list_optimized[x].keys():
wavelengths = self.parsed_yaml_list[x]['absorbanceCsvWavelengths'][0]
time = exp_dict_list_optimized[x]['absorbance_experimental_data'][0]['time']
plt.subplot(4, 1, 1)
plt.title('Experiment_'+str(x+1)+'_Wavelength_'+str(wavelengths))
plt.plot(time*1e3,Y_values_to_plot)
plt.tick_params(labelbottom=False)
plt.ylabel('Y_matrix')
plt.subplot(plt.subplot(4, 1, 2))
plt.plot(time*1e3,y_values_to_plot)
plt.tick_params(labelbottom=False)
plt.ylabel('y_matrix')
plt.subplot(plt.subplot(4, 1, 3))
plt.plot(time*1e3,sigmas_to_plot)
plt.tick_params(labelbottom=False)
plt.ylabel('sigma')
plt.subplot(plt.subplot(4, 1, 4))
plt.plot(time*1e3,np.array(Y_values_to_plot)/np.array(sigmas_to_plot))
plt.ylabel('Y/sigma')
plt.xlabel('time')
plt.savefig(self.working_directory+'/'+'Experiment_'+str(x+1)+' '+'Absorbance at'+'_'+str(wavelengths)+'.pdf', bbox_inches='tight')
else:
time = exp_dict_list_optimized[x]['experimental_data'][y]['Time']
plt.subplot(4, 1, 1)
plt.plot(time*1e3,Y_values_to_plot)
plt.tick_params(labelbottom=False)
plt.title('Experiment_'+str(x+1)+'_observable_'+exp_dict_list_optimized[0]['observables'][observable_counter])
plt.ylabel('Y_matrix')
plt.subplot(plt.subplot(4, 1, 2))
plt.plot(time*1e3,y_values_to_plot)
plt.tick_params(labelbottom=False)
plt.ylabel('y_matrix')
plt.subplot(plt.subplot(4, 1, 3))
plt.plot(time*1e3,sigmas_to_plot)
plt.tick_params(labelbottom=False)
plt.ylabel('sigma')
plt.subplot(plt.subplot(4, 1, 4))
plt.plot(time*1e3,np.array(Y_values_to_plot)/np.array(sigmas_to_plot))
plt.ylabel('Y/sigma')
plt.xlabel('time')
plt.savefig('Experiment_'+str(x+1)+'_observable_'+exp_dict_list_optimized[0]['observables'][observable_counter]+'.pdf', bbox_inches='tight')
observable_counter+=1
counter = counter + self.simulation_lengths_of_experimental_data[x][y]
return
def shorten_sigma(self):
flat_list = [item for sublist in self.simulation_lengths_of_experimental_data for item in sublist]
length = sum(flat_list)
observables_list = self.Ydf['value'].tolist()[length:]
short_sigma = list(self.sigma)[length:]
#print(flat_list)
if bool(self.target_value_rate_constant_csv) and self.k_target_values=='On':
k_target_value_csv = pd.read_csv(self.target_value_rate_constant_csv)
shape = k_target_value_csv.shape[0]
slc = len(observables_list) - shape
observables_list = observables_list[:slc]
short_sigma = short_sigma[:slc]
short_sigma = np.array(short_sigma)
self.short_sigma = short_sigma
return
def sort_top_uncertainty_weighted_sens(self,top_sensitivity=10):
S_matrix_copy = copy.deepcopy(self.S_matrix)
S_matrix_copy = copy.deepcopy(self.S_matrix_original)
self.shorten_sigma()
sigma_csv = self.sigma_uncertainty_weighted_sensitivity_csv
if bool(sigma_csv):
df = pd.read_csv(sigma_csv)
Sig = np.array(df['Sigma'])
Sig = Sig.reshape((Sig.shape[0],1))
elif self.sigma_ones==True:
shape = len(self.short_sigma)
Sig = np.ones((shape,1))
else:
Sig = self.short_sigma
#Sig = self.sigma
for pp in range(np.shape(S_matrix_copy)[1]):
S_matrix_copy[:,pp] *=Sig[pp]
sensitivitys =[[] for x in range(len(self.simulation_lengths_of_experimental_data))]
topSensitivities = [[] for x in range(len(self.simulation_lengths_of_experimental_data))]
start=0
stop = 0
for x in range(len(self.simulation_lengths_of_experimental_data)):
for y in range(len(self.simulation_lengths_of_experimental_data[x])):
stop = self.simulation_lengths_of_experimental_data[x][y] + start
temp = S_matrix_copy[start:stop,:]
sort_s= pd.DataFrame(temp).reindex(pd.DataFrame(temp).abs().max().sort_values(ascending=False).index, axis=1)
cc=pd.DataFrame(sort_s).iloc[:,:top_sensitivity]
top_five_reactions=cc.columns.values.tolist()
topSensitivities[x].append(top_five_reactions)
#ccn=pd.DataFrame(cc).as_matrix()
ccn=pd.DataFrame(cc).to_numpy()
sensitivitys[x].append(ccn)
start = start + self.simulation_lengths_of_experimental_data[x][y]
return sensitivitys,topSensitivities
def getting_time_profiles_for_experiments(self, exp_dict_list_optimized):
time_profiles =[[] for x in range(len(self.simulation_lengths_of_experimental_data))]
observables = [[] for x in range(len(self.simulation_lengths_of_experimental_data))]
for i,exp in enumerate(self.exp_dict_list_optimized):
observable_counter=0
for j,observable in enumerate(exp['mole_fraction_observables'] +
exp['concentration_observables'] +
exp['ignition_delay_observables']):
if observable == None:
continue
if observable in exp['mole_fraction_observables']:
if re.match('[Ss]hock [Tt]ube',exp['simulation_type']):
time_profiles[i].append(exp['experimental_data'][observable_counter]['Time']*1e3)
observables[i].append(observable)
observable_counter+=1
elif re.match('[Jj][Ss][Rr]',exp['simulation_type']):
time_profiles[i].append(exp['experimental_data'][observable_counter]['Temperature'])
observables[i].append(observable)
observable_counter+=1
elif re.match('[Ff]low[ -][Rr][eactor]',exp['simulation_type']):
time_profiles[i].append(exp['experimental_data'][observable_counter]['Temperature'])
observables[i].append(observable)
observable_counter+=1
elif observable in exp['concentration_observables']:
if re.match('[Ss]hock [Tt]ube',exp['simulation_type']):
time_profiles[i].append(exp['experimental_data'][observable_counter]['Time']*1e3)
observables[i].append(observable)
observable_counter+=1
elif re.match('[Jj][Ss][Rr]',exp['simulation_type']):
time_profiles[i].append(exp['experimental_data'][observable_counter]['Temperature'])
observables[i].append(observable)
observable_counter+=1
elif re.match('[Ff]low[ -][Rr][eactor]',exp['simulation_type']):
time_profiles[i].append(exp['experimental_data'][observable_counter]['Temperature'])
observables[i].append(observable)
observable_counter+=1
elif observable in exp['ignition_delay_observables']:
if re.match('[Ss]hock [Tt]ube',exp['simulation_type']):
time_profiles[i].append(exp['experimental_data'][observable_counter]['temperature'])
observables[i].append(observable)
observable_counter+=1
if 'perturbed_coef' in exp.keys():
wavelengths = self.parsed_yaml_list[i]['absorbanceCsvWavelengths']
for k,wl in enumerate(wavelengths):
time_profiles[i].append(exp['absorbance_experimental_data'][k]['time']*1e3)
observables[i].append('Absorbance_'+str(wl))
self.time_profiles = time_profiles
self.observable_list = observables
return time_profiles
def get_observables_list(self):
#use this function to return observable list and uncertainty pass in csv and get unc and csv
sigma_csv = self.sigma_uncertainty_weighted_sensitivity_csv
gas = ct.Solution(self.new_cti)
reaction_equations = gas.reaction_equations()
if bool(sigma_csv):
df = pd.read_csv(sigma_csv)
Sig = df['Sigma'].values
Sig = np.array(Sig)
Sig = Sig.reshape((Sig.shape[0],1))
observable_list = df['Observable'].tolist()
self.sigma_list = Sig
#print(self.sigma_list)
return observable_list
else:
flat_list = [item for sublist in self.simulation_lengths_of_experimental_data for item in sublist]
length = sum(flat_list)
observables_list = self.Ydf['value'].tolist()[length:]
if bool(self.target_value_rate_constant_csv) and self.k_target_values=='On':
k_target_value_csv = pd.read_csv(self.target_value_rate_constant_csv)
shape = k_target_value_csv.shape[0]
slc = len(observables_list) - shape
observables_list = observables_list[:slc]
#transform observable list
observable_list_transformed = []
for obs in observables_list:
lst = obs.split('_')
if lst[0] =='A':
reaction_indx = int(lst[1])
reaction = reaction_equations[reaction_indx]
observable_list_transformed.append('A_'+reaction)
elif lst[0] =='n':
reaction_indx = int(lst[1])
reaction = reaction_equations[reaction_indx]
observable_list_transformed.append('n_'+reaction)
elif lst[0] =='Ea':
reaction_indx = int(lst[1])
reaction = reaction_equations[reaction_indx]
observable_list_transformed.append('Ea_'+reaction)
else:
observable_list_transformed.append(obs)
return observable_list_transformed
def plotting_uncertainty_weighted_sens(self):
sensitivities,top_sensitivities = self.sort_top_uncertainty_weighted_sens()
observables_list = self.get_observables_list()
if bool(self.sigma_uncertainty_weighted_sensitivity_csv):
sigma_list = self.sigma_list
else:
sigma_list = list(self.short_sigma)
#start here
time_profiles = self.getting_time_profiles_for_experiments(self.exp_dict_list_optimized)
list_of_experiment_observables = self.observable_list
def subplot_function(number_of_observables_in_simulation,time_profiles,sensitivities,top_sensitivity_single_exp,observables_list,list_of_experiment_observables,experiment_number):
#plt.figure(figsize=(2,6))
#stub
plt.figure()
for plot_number in range(number_of_observables_in_simulation):
for c,top_columns in enumerate(top_sensitivity_single_exp[plot_number]):
plt.subplot(number_of_observables_in_simulation,1,plot_number+1)
if plot_number==0:
plt.title('Experiment_'+str(experiment_number+1))
plt.plot(time_profiles[plot_number],sensitivities[plot_number][:,c],label = observables_list[top_columns] +'_'+str(sigma_list[top_columns]))
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=1.5)
plt.ylabel(list_of_experiment_observables[plot_number])
top,bottom = plt.ylim()
left,right = plt.xlim()
plt.legend(ncol=5, loc='upper left',bbox_to_anchor=(-.5,-.3))
#plt.legend(ncol=3, loc='upper left',bbox_to_anchor=(1.2,2),fontsize=2)
if self.simulation_run==None:
plt.savefig(self.working_directory+'/'+'Experiment_'+str(experiment_number+1)+'.pdf', bbox_inches='tight')
else:
plt.title('Experiment_'+str(self.simulation_run))
plt.savefig(self.working_directory+'/'+'Experiment_'+str(self.simulation_run)+'.pdf', bbox_inches='tight')
for x in range(len(sensitivities)):
number_of_observables_in_simulation = len(sensitivities[x])
subplot_function(number_of_observables_in_simulation,time_profiles[x],sensitivities[x],top_sensitivities[x],observables_list,list_of_experiment_observables[x],x)
return
def plotting_rate_constants_six_paramter_fit(self,optimized_cti_file='',
original_cti_file='',
initial_temperature=250,
final_temperature=2500,
master_equation_reactions = [],
six_parameter_fit_dict_optimized = {},
six_parameter_fit_dict_nominal = {},
six_parameter_fit_sensitivity_dict = {}):
gas_optimized = ct.Solution(optimized_cti_file)
gas_original = ct.Solution(original_cti_file)
def unique_list(seq):
checked = []
for e in seq:
if e not in checked:
checked.append(e)
return checked
################################################################################
def target_values_for_S_six_parameter_fit(target_value_csv,
exp_dict_list,
S_matrix,
master_equation_reaction_list = [],
six_parameter_fit_sensitivity_dict = {}):
target_value_csv = pd.read_csv(target_value_csv)
target_reactions = target_value_csv['Reaction']
target_temp = target_value_csv['temperature']
target_press = target_value_csv['pressure']
target_k = target_value_csv['k']
reactions_in_cti_file = exp_dict_list[0]['simulation'].processor.solution.reaction_equations()
number_of_reactions_in_cti = len(reactions_in_cti_file)
As = []
Ns = []
Eas = []
Number_of_MP = []
#nested_reaction_list = [[] for x in range(len(master_equation_reaction_list))]
#print(six_parameter_fit_sensitivity_dict.keys())
def create_empty_nested_reaction_list():
nested_reaction_list = [[] for x in range(len(master_equation_reaction_list))]
for reaction in master_equation_reaction_list:
for i,MP in enumerate(six_parameter_fit_sensitivity_dict[reaction]['A']):
nested_reaction_list[master_equation_reaction_list.index(reaction)].append(0)
#copy.deepcopy(nested_reaction_list)
#don't think i need this
return nested_reaction_list
MP_stack = []
target_values_to_stack = []
for i,reaction in enumerate(target_reactions):
#temp_array = np.zeros((1,Number_of_MP))
if reaction in master_equation_reaction_list:
nested_reaction_list = create_empty_nested_reaction_list()
for s,sensitivity in enumerate(six_parameter_fit_sensitivity_dict[reaction]['A']):
#stub
#start here tomorrow
nested_reaction_list[master_equation_reaction_list.index(reaction)][s] = 1*six_parameter_fit_sensitivity_dict[reaction]['A'][s] + np.log(target_temp[i])*six_parameter_fit_sensitivity_dict[reaction]['n'][s] + (-1000/target_temp[i])*six_parameter_fit_sensitivity_dict[reaction]['Ea'][s] + (-(1000/target_temp[i])**3)*six_parameter_fit_sensitivity_dict[reaction]['c'][s]+ (-(1000/target_temp[i])**-1)*six_parameter_fit_sensitivity_dict[reaction]['d'][s] + (-(1000/target_temp[i])**-3)*six_parameter_fit_sensitivity_dict[reaction]['f'][s]
#nested_reaction_list[master_equation_reaction_list.index(reaction)][s] = 1*six_parameter_fit_sensitivity_dict[reaction]['A'][s] + np.log(target_temp[i])*six_parameter_fit_sensitivity_dict[reaction]['n'][s] + (-1/target_temp[i])*six_parameter_fit_sensitivity_dict[reaction]['Ea'][s] + (-(1000/target_temp[i])**3)*six_parameter_fit_sensitivity_dict[reaction]['c'][s]+ (-(1000/target_temp[i])**-1)*six_parameter_fit_sensitivity_dict[reaction]['d'][s]*(1000*4.184)**-1 + (-(1/target_temp[i])**-3)*six_parameter_fit_sensitivity_dict[reaction]['f'][s]*(1000*4.184)**-3
temp = nested_reaction_list
flat_list = [item for sublist in temp for item in sublist]
MP_stack.append(nested_reaction_list)
flat_list = np.array(flat_list)
flat_list = flat_list.reshape((1,flat_list.shape[0]))
target_values_to_stack.append(flat_list)
else:
A_temp = np.zeros((1,number_of_reactions_in_cti-len(master_equation_reaction_list)))
N_temp = np.zeros((1,number_of_reactions_in_cti-len(master_equation_reaction_list)))
Ea_temp = np.zeros((1,number_of_reactions_in_cti-len(master_equation_reaction_list)))
#decide if this mapping is correct
A_temp[0,reactions_in_cti_file.index(reaction)] = 1
N_temp [0,reactions_in_cti_file.index(reaction)] = np.log(target_temp[i])
Ea_temp[0,reactions_in_cti_file.index(reaction)] = (-1/target_temp[i])
As.append(A_temp)
Ns.append(N_temp)
Eas.append(Ea_temp)
A_temp = A_temp.reshape((1,A_temp.shape[1]))
N_temp = N_temp.reshape((1,N_temp.shape[1]))
Ea_temp = Ea_temp.reshape((1,Ea_temp.shape[1]))
target_values_to_stack.append(np.hstack((A_temp,N_temp,Ea_temp)))
# might need to edit this to pass in s? and
S_matrix = S_matrix
shape_s = S_matrix.shape
S_target_values = []
for i,row in enumerate(target_values_to_stack):
if target_reactions[i] in master_equation_reaction_list:
zero_to_append_infront = np.zeros((1,((number_of_reactions_in_cti-len(master_equation_reaction_list))*3)))
zero_to_append_behind = np.zeros((1, shape_s[1] - ((number_of_reactions_in_cti-len(master_equation_reaction_list))*3) - np.shape(row)[1] ))
temp_array = np.hstack((zero_to_append_infront,row,zero_to_append_behind))
S_target_values.append(temp_array)
else:
zero_to_append_behind = np.zeros((1,shape_s[1]-np.shape(row)[1]))
temp_array = np.hstack((row,zero_to_append_behind))
S_target_values.append(temp_array)
S_target_values = np.vstack((S_target_values))
return S_target_values
################################################################################
def calculate_six_parameter_fit(reaction,dictonary,temperature):
#finish editing this
#calc Ea,c,d,F seprately
A = dictonary[reaction]['A']
n = dictonary[reaction]['n']
Ea_temp = dictonary[reaction]['Ea']/(1.987*temperature)
c_temp = dictonary[reaction]['c']/((1.987*temperature)**3)
d_temp = dictonary[reaction]['d']*(1.987*temperature)
f_temp = dictonary[reaction]['f']* ((1.987*temperature)**3)
k = A*(temperature**n)*np.exp(-Ea_temp-c_temp-d_temp-f_temp)
return k
def sort_rate_constant_target_values(parsed_csv,unique_reactions,gas):
reaction_list_from_mechanism = gas.reaction_equations()
target_value_ks = [[] for reaction in range(len(unique_reactions))]
target_value_temps = [[] for reaction in range(len(unique_reactions))]
reaction_list_from_mechanism = gas.reaction_equations()
for i,reaction in enumerate(parsed_csv['Reaction']):
idx = reaction_list_from_mechanism.index(reaction)
target_value_ks[unique_reactions.index(idx)].append(parsed_csv['k'][i])
target_value_temps[unique_reactions.index(idx)].append(parsed_csv['temperature'][i])
return target_value_temps,target_value_ks
def rate_constant_over_temperature_range_from_cantera(reaction_number,
gas,
initial_temperature=250,
final_temperature=2500,
pressure=1,
conditions = {'H2':2,'O2':1,'N2':4},
dictonary={},
master_equation_reactions=[]):
Temp = []
k = []
reaction_string = gas.reaction_equations()[reaction_number]
for temperature in np.arange(initial_temperature,final_temperature,1):
if reaction_string in master_equation_reactions:
k.append(calculate_six_parameter_fit(reaction_string,dictonary,temperature))
Temp.append(temperature)
#start editing here
else:
gas.TPX = temperature,pressure*101325,conditions
Temp.append(temperature)
k.append(gas.forward_rate_constants[reaction_number]*1000)
return Temp,k
def calculate_sigmas_for_rate_constants(k_target_value_S_matrix,k_target_values_parsed_csv,unique_reactions,gas,covarience):
reaction_list_from_mechanism = gas.reaction_equations()
sigma_list_for_target_ks = [[] for reaction in range(len(unique_reactions))]
shape = k_target_value_S_matrix.shape
for row in range(shape[0]):
#print(row)
SC = np.dot(k_target_value_S_matrix[row,:],covarience)
sigma_k = np.dot(SC,np.transpose(k_target_value_S_matrix[row,:]))
sigma_k = np.sqrt(sigma_k)
#print(row)
#print(k_target_values_parsed_csv['Reaction'][row])
indx = reaction_list_from_mechanism.index(k_target_values_parsed_csv['Reaction'][row])
sigma_list_for_target_ks[unique_reactions.index(indx)].append(sigma_k)
return sigma_list_for_target_ks
def calculating_target_value_ks_from_cantera_for_sigmas(k_target_values_parsed_csv,gas,unique_reactions,six_parameter_fit_dictonary,master_equation_reactions):
target_value_ks = [[] for reaction in range(len(unique_reactions))]
target_reactions = k_target_values_parsed_csv['Reaction']
target_temp = k_target_values_parsed_csv['temperature']
target_press = k_target_values_parsed_csv['pressure']
reactions_in_cti_file = gas.reaction_equations()
#print(reactions_in_cti_file)
for i,reaction in enumerate(target_reactions):
if reaction in master_equation_reactions:
k = calculate_six_parameter_fit(reaction,six_parameter_fit_dictonary,target_temp[i])
indx = reactions_in_cti_file.index(reaction)
target_value_ks[unique_reactions.index(indx)].append(k)
else:
if target_press[i] == 0:
pressure = 1e-9
else:
pressure = target_press[i]
gas.TPX = target_temp[i],pressure*101325,{'H2O2':0.003094,'O2':0.000556,'H2O':0.001113,'Ar':0.995237}
reaction_number_in_cti = reactions_in_cti_file.index(reaction)
k = gas.forward_rate_constants[reaction_number_in_cti]
indx = reactions_in_cti_file.index(reaction)
target_value_ks[unique_reactions.index(indx)].append(k*1000)
return target_value_ks
if bool(self.target_value_rate_constant_csv) and self.k_target_values=='On':
### make new s matrix with the new csv file, and make sure we are plotting the old one
S_matrix_k_target_values_extra = target_values_for_S_six_parameter_fit(self.target_value_rate_constant_csv_extra_values,
self.exp_dict_list_optimized,
self.S_matrix,
master_equation_reaction_list = master_equation_reactions,
six_parameter_fit_sensitivity_dict = six_parameter_fit_sensitivity_dict)
#make two unique
unique_reactions_optimized=[]
unique_reactions_original = []
reaction_list_from_mechanism_original = gas_original.reaction_equations()
reaction_list_from_mechanism = gas_optimized.reaction_equations()
k_target_value_csv_extra = pd.read_csv(self.target_value_rate_constant_csv_extra_values)
k_target_value_csv = pd.read_csv(self.target_value_rate_constant_csv)
for row in range(k_target_value_csv_extra.shape[0]):
unique_reactions_optimized.append(reaction_list_from_mechanism.index(k_target_value_csv_extra['Reaction'][row]))
unique_reactions_original.append(reaction_list_from_mechanism_original.index(k_target_value_csv_extra['Reaction'][row]))
unique_reactions_optimized = unique_list(unique_reactions_optimized)
unique_reactions_original = unique_list(unique_reactions_original)
sigma_list_for_target_ks_optimized = calculate_sigmas_for_rate_constants(S_matrix_k_target_values_extra,k_target_value_csv_extra,unique_reactions_optimized,gas_optimized,self.covarience)
self.sigma_list_for_target_ks_optimized = sigma_list_for_target_ks_optimized
sigma_list_for_target_ks_original = calculate_sigmas_for_rate_constants(S_matrix_k_target_values_extra,k_target_value_csv_extra,unique_reactions_original,gas_original,self.original_covariance)
self.sigma_list_for_target_ks_original = sigma_list_for_target_ks_original
######################
target_value_temps_optimized,target_value_ks_optimized = sort_rate_constant_target_values(k_target_value_csv_extra,unique_reactions_optimized,gas_optimized)
target_value_temps_original,target_value_ks_original = sort_rate_constant_target_values(k_target_value_csv_extra,unique_reactions_original,gas_original)
#############################################
unique_reactions_optimized_for_plotting=[]
unique_reactions_original_for_plotting = []
for row in range(k_target_value_csv.shape[0]):
unique_reactions_optimized_for_plotting.append(reaction_list_from_mechanism.index(k_target_value_csv['Reaction'][row]))
unique_reactions_original_for_plotting.append(reaction_list_from_mechanism_original.index(k_target_value_csv['Reaction'][row]))
unique_reactions_optimized_for_plotting = unique_list(unique_reactions_optimized)
unique_reactions_original_for_plotting = unique_list(unique_reactions_original)
target_value_temps_optimized_for_plotting,target_value_ks_optimized_for_plotting = sort_rate_constant_target_values(k_target_value_csv,unique_reactions_optimized_for_plotting,gas_optimized)
target_value_temps_original_for_plotting,target_value_ks_original_for_plotting = sort_rate_constant_target_values(k_target_value_csv,unique_reactions_original_for_plotting,gas_original)
#############################################
target_value_ks_calculated_with_cantera_optimized = calculating_target_value_ks_from_cantera_for_sigmas(k_target_value_csv_extra,gas_optimized,unique_reactions_optimized,six_parameter_fit_dict_optimized,master_equation_reactions)
target_value_ks_calculated_with_cantera_original = calculating_target_value_ks_from_cantera_for_sigmas(k_target_value_csv_extra,gas_original,unique_reactions_original,six_parameter_fit_dict_nominal,master_equation_reactions)
#print(target_value_ks_calculated_with_cantera_original)
for i,reaction in enumerate(unique_reactions_optimized):
plt.figure()
Temp_optimized,k_optimized = rate_constant_over_temperature_range_from_cantera(reaction,
gas_optimized,
initial_temperature=250,
final_temperature=2500,
pressure=1.635,
conditions={'H2O2':0.003094,'O2':0.000556,'H2O':0.001113,'Ar':0.995237},
dictonary = six_parameter_fit_dict_optimized,
master_equation_reactions = master_equation_reactions)
plt.semilogy(Temp_optimized,k_optimized,'b')
#calculate sigmas
#print(sigma_list_for_target_ks_optimized[i])
high_error_optimized = np.exp(np.array(sigma_list_for_target_ks_optimized[i]))
#print(high_error_optimized)
high_error_optimized = np.multiply(high_error_optimized,target_value_ks_calculated_with_cantera_optimized[i])
low_error_optimized = np.exp(np.array(sigma_list_for_target_ks_optimized[i])*-1)
low_error_optimized = np.multiply(low_error_optimized,target_value_ks_calculated_with_cantera_optimized[i])
# plt.semilogy(target_value_temps_optimized[i],high_error_optimized,'b--')
a, b = zip(*sorted(zip(target_value_temps_optimized[i],high_error_optimized)))
#plt.scatter(a,b,color='blue')
plt.semilogy(a,b,'b--')
a, b = zip(*sorted(zip(target_value_temps_optimized[i],low_error_optimized)))
plt.semilogy(a,b,'b--')
#plt.scatter(a,b,color='blue')
# print(a,b)
Temp_original,k_original = rate_constant_over_temperature_range_from_cantera(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction]),
gas_original,
initial_temperature=250,
final_temperature=2500,
pressure=1.635,
conditions={'H2O2':0.003094,'O2':0.000556,'H2O':0.001113,'Ar':0.995237},
dictonary = six_parameter_fit_dict_nominal,
master_equation_reactions = master_equation_reactions)
plt.semilogy(Temp_original,k_original,'r')
# plt.xlim((0,3000))
#plt.ylim((10**9,10**15))
#print(unique_reactions_original)
# print(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction]))
#print(unique_reactions_original.index(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction])))
high_error_original = np.exp(sigma_list_for_target_ks_original[unique_reactions_original.index(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction]))])
high_error_original = np.multiply(high_error_original,target_value_ks_calculated_with_cantera_original[unique_reactions_original.index(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction]))])
low_error_original = np.exp(np.array(sigma_list_for_target_ks_original[unique_reactions_original.index(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction]))])*-1)
low_error_original = np.multiply(low_error_original,target_value_ks_calculated_with_cantera_original[unique_reactions_original.index(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction]))])
a, b = zip(*sorted(zip(target_value_temps_original[unique_reactions_original.index(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction]))],high_error_original)))
plt.semilogy(a,b,'r--')
#plt.scatter(a,b,color='red')
a, b = zip(*sorted(zip(target_value_temps_original[unique_reactions_original.index(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction]))],low_error_original)))
plt.semilogy(a,b,'r--')
#plt.scatter(a,b,color='red')
plt.semilogy(target_value_temps_optimized_for_plotting[i],target_value_ks_optimized_for_plotting[i],'o',color='black')
plt.xlabel('Temperature [K]')
#plt.ylabel('Kmol/m^3-s')
plt.ylabel(r'k [$\frac{cm^3}{{mol s}}$]')
plt.title(reaction_list_from_mechanism[reaction])
plt.tick_params(axis ='both', direction ='in')
plt.tick_params(axis ='both', direction ='in',which='minor')
plt.savefig(os.path.join(self.working_directory,reaction_list_from_mechanism[reaction]+'.pdf'), bbox_inches='tight')
plt.savefig(os.path.join(self.working_directory,reaction_list_from_mechanism[reaction]+'.svg'), bbox_inches='tight')
elif bool(self.target_value_rate_constant_csv) and self.k_target_values=='Off':
unique_reactions_optimized=[]
unique_reactions_original = []
reaction_list_from_mechanism_original = gas_original.reaction_equations()
reaction_list_from_mechanism = gas_optimized.reaction_equations()
k_target_value_csv = pd.read_csv(self.target_value_rate_constant_csv)
for row in range(k_target_value_csv.shape[0]):
unique_reactions_optimized.append(reaction_list_from_mechanism.index(k_target_value_csv['Reaction'][row]))
unique_reactions_original.append(reaction_list_from_mechanism_original.index(k_target_value_csv['Reaction'][row]))
unique_reactions_optimized = unique_list(unique_reactions_optimized)
unique_reactions_original = unique_list(unique_reactions_original)
######################
target_value_temps_optimized,target_value_ks_optimized = sort_rate_constant_target_values(k_target_value_csv,unique_reactions_optimized,gas_optimized)
target_value_temps_original,target_value_ks_original = sort_rate_constant_target_values(k_target_value_csv,unique_reactions_original,gas_original)
#############################################
target_value_ks_calculated_with_cantera_optimized = calculating_target_value_ks_from_cantera_for_sigmas(k_target_value_csv,gas_optimized,unique_reactions_optimized)
target_value_ks_calculated_with_cantera_original = calculating_target_value_ks_from_cantera_for_sigmas(k_target_value_csv,gas_original,unique_reactions_original)
for i,reaction in enumerate(unique_reactions_optimized):
plt.figure()
Temp_optimized,k_optimized = rate_constant_over_temperature_range_from_cantera(reaction,
gas_optimized,
initial_temperature=250,
final_temperature=2500,
pressure=1,
conditions={'H2':2,'O2':1,'Ar':4})
plt.semilogy(Temp_optimized,k_optimized,'b')
Temp_original,k_original = rate_constant_over_temperature_range_from_cantera(unique_reactions_original[unique_reactions_original.index(reaction)],
gas_original,
initial_temperature=250,
final_temperature=2500,
pressure=1,
conditions={'H2':2,'O2':1,'Ar':4})
plt.semilogy(Temp_original,k_original,'r')
plt.semilogy(target_value_temps_optimized[i],target_value_ks_optimized[i],'o',color='black')
plt.xlabel('Temperature (K)')
plt.ylabel('Kmol/m^3-s')
plt.title(reaction_list_from_mechanism[reaction])
return S_matrix_k_target_values_extra
def plotting_normal_distributions(self,
paramter_list,
optimized_cti_file='',
pdf_distribution_file='',
shock_tube_instance=None):
all_parameters = shock_tube_instance.posterior_diag_df['parameter'].tolist()
df = shock_tube_instance.posterior_diag_df
gas_optimized = ct.Solution(optimized_cti_file)
for parameter in paramter_list:
indx = all_parameters.index(parameter)
variance = df['value'][indx]
if parameter[0]=='A' or parameter[0]=='n' or parameter[0]=='E':
letter,number = parameter.split('_')
number = int(number)
if 'ElementaryReaction' in str(type(gas_optimized.reaction(number))):
A=gas_optimized.reaction(number).rate.pre_exponential_factor
n=gas_optimized.reaction(number).rate.temperature_exponent
Ea=gas_optimized.reaction(number).rate.activation_energy
if 'FalloffReaction' in str(type(gas_optimized.reaction(number))):
A=gas_optimized.reaction(number).high_rate.pre_exponential_factor
n=gas_optimized.reaction(number).high_rate.temperature_exponent
Ea=gas_optimized.reaction(number).high_rate.activation_energy
if 'ThreeBodyReaction' in str(type(gas_optimized.reaction(number))):
A=gas_optimized.reaction(number).rate.pre_exponential_factor
n=gas_optimized.reaction(number).rate.temperature_exponent
Ea=gas_optimized.reaction(number).rate.activation_energy
else:
letter = None
if letter =='A':
mu = np.log(A*1000)
sigma = math.sqrt(variance)
sigma = sigma
elif letter == 'n':
mu = n
sigma = math.sqrt(variance)
#sigma = sigma/2
elif letter == 'Ea':
mu=Ea/1000/4.184
sigma = math.sqrt(variance)
sigma = sigma*ct.gas_constant/(1000*4.184)
#sigma = sigma/2
else:
mu= 0
sigma = math.sqrt(variance)
x = np.linspace(mu - 3*sigma, mu + 3*sigma, 100)
plt.figure()
plt.plot(x, stats.norm.pdf(x, mu, sigma))
plt.xlabel(parameter)
plt.ylabel('pdf')
plt.savefig(self.working_directory+'/'+parameter+'_distribution'+'_.pdf',bbox_inches='tight')
if bool(pdf_distribution_file):
df2 = pd.read_csv(pdf_distribution_file)
#temp = np.log(np.exp(df2[parameter].values)/9.33e13)
#plt.plot(temp,df2['pdf_'+parameter])
plt.plot(df2[parameter],df2['pdf_'+parameter])
plt.savefig(self.working_directory+'/'+parameter+'_distribution'+'_.pdf',bbox_inches='tight')
def plotting_joint_normal_distributions(self,
coupled_parameters,
optimized_cti_file='',
joint_data_csv=''):
all_parameters = self.shock_tube_instance.posterior_diag_df['parameter'].tolist()
df = self.shock_tube_instance.posterior_diag_df
gas_optimized = ct.Solution(optimized_cti_file)
for couple in coupled_parameters:
indx1 = all_parameters.index(couple[0])
indx2 = all_parameters.index(couple[1])
variance1 = df['value'][indx1]
variance2 = df['value'][indx2]
if couple[0][0]=='A' or couple[0][0]=='n' or couple[0][0]=='E':
letter1,number1 = couple[0].split('_')
number1 = int(number1)
number1_covariance = number1
if letter1=='n':
number1_covariance = number1+len(gas_optimized.reaction_equations())
if letter1=='Ea':
number1_covariance = number1+len(gas_optimized.reaction_equations())*2
if 'ElementaryReaction' in str(type(gas_optimized.reaction(number1))):
A1=gas_optimized.reaction(number1).rate.pre_exponential_factor
n1=gas_optimized.reaction(number1).rate.temperature_exponent
Ea1=gas_optimized.reaction(number1).rate.activation_energy
if 'FalloffReaction' in str(type(gas_optimized.reaction(number1))):
A1=gas_optimized.reaction(number1).high_rate.pre_exponential_factor
n1=gas_optimized.reaction(number1).high_rate.temperature_exponent
Ea1=gas_optimized.reaction(number1).high_rate.activation_energy
if 'ThreeBodyReaction' in str(type(gas_optimized.reaction(number1))):
A1=gas_optimized.reaction(number1).rate.pre_exponential_factor
n1=gas_optimized.reaction(number1).rate.temperature_exponent
Ea1=gas_optimized.reaction(number1).rate.activation_energy
else:
letter1 = None
mu1=0
mu_x=0
sigma1= math.sqrt(variance1)
number1_covariance = indx1
variance_x = variance1
if couple[1][0]=='A' or couple[1][0]=='n' or couple[1][0]=='E':
letter2,number2 = couple[1].split('_')
number2 = int(number2)
number2_covariance = number2
if letter2=='n':
number2_covariance = number2+len(gas_optimized.reaction_equations())
if letter2 == 'Ea':
number2_covariance = number2+len(gas_optimized.reaction_equations())*2
if 'ElementaryReaction' in str(type(gas_optimized.reaction(number2))):
A2=gas_optimized.reaction(number2).rate.pre_exponential_factor
n2=gas_optimized.reaction(number2).rate.temperature_exponent
Ea2=gas_optimized.reaction(number2).rate.activation_energy
if 'FalloffReaction' in str(type(gas_optimized.reaction(number2))):
A2=gas_optimized.reaction(number2).high_rate.pre_exponential_factor
n2=gas_optimized.reaction(number2).high_rate.temperature_exponent
Ea2=gas_optimized.reaction(number2).high_rate.activation_energy
if 'ThreeBodyReaction' in str(type(gas_optimized.reaction(number2))):
A2=gas_optimized.reaction(number2).rate.pre_exponential_factor
n2=gas_optimized.reaction(number2).rate.temperature_exponent
Ea2=gas_optimized.reaction(number2).rate.activation_energy
else:
mu_y=0
mu2=0
letter2=None
variance_y = variance2
sigma = math.sqrt(variance2)
number2_covariance = indx2
covariance_couple = self.covarience[number1_covariance,number2_covariance]
# print(number1_covariance,number2_covariance)
#covariance_couple = .00760122
if letter1 =='A':
mu1 = np.log(A1*1000)
mu_x = mu1
variance_x = variance1
sigma = np.sqrt(variance_x)
#sigma = np.exp(sigma)
#sigma = sigma*1000
#sigma = np.log(sigma)
#sigma = sigma/2
variance_x = sigma**2
#convert to chemkin units
if letter1 == 'n':
mu1 = n1
mu_x = mu1
variance_x = variance1
sigma = np.sqrt(variance_x)
#sigma = sigma/2
variance_x = sigma**2
if letter1 == 'Ea':
mu1=Ea1/1000/4.184
mu_x = mu1
variance_x = variance1
sigma = math.sqrt(variance_x)
sigma = sigma*ct.gas_constant/(1000*4.184)
#sigma = sigma/2
variance_x = sigma**2
if letter2 =='A':
mu2 = np.log(A2*1000)
mu_y = mu2
variance_y = variance2
sigma = np.sqrt(variance_y)
sigma = sigma
#sigma = np.exp(sigma)
#sigma = sigma*1000
#sigma = np.log(sigma)
#sigma = sigma/2
variance_y = sigma**2
#convert to chemkin units
if letter2 == 'n':
mu2 = n2
mu_y = mu2
variance_y = variance2
sigma = np.sqrt(variance_y)
#sigma = sigma/2
variance_y = sigma**2
if letter2 == 'Ea':
mu2 = Ea2/1000/4.184
mu_y = mu2
variance_y = variance2
sigma = math.sqrt(variance_y)
sigma = sigma*ct.gas_constant/(1000*4.184)
#sigma = sigma/2
variance_y = sigma**2
if letter2 =='Ea' or letter1 == 'Ea':
covariance_couple = covariance_couple*ct.gas_constant/(1000*4.184)
if letter2=='Ea' and letter1=='Ea':
covariance_couple = np.sqrt(covariance_couple)
covariance_couple = covariance_couple*ct.gas_constant/(1000*4.184)
covariance_couple = covariance_couple**2
#if letter1=='A' or letter2=='A':
#covariance_couple = np.exp(covariance_couple)
#covariance_couple = covariance_couple/2
#covariance_couple = np.log(covariance_couple)
x = np.linspace(mu1 - 3*np.sqrt(variance_x), mu1 + 3*np.sqrt(variance_x),1000)
y = np.linspace(mu2 - 3*np.sqrt(variance_y), mu2 + 3*np.sqrt(variance_y),1000)
#x = np.linspace(mu1 - 2*np.sqrt(variance_x), mu1 + 2*np.sqrt(variance_x),1000)
#y = np.linspace(mu2 - 2*np.sqrt(variance_y), mu2 + 2*np.sqrt(variance_y),1000)
#TEST
X,Y = np.meshgrid(x,y)
#X, Y = np.meshgrid(x,y)
pos = np.empty(X.shape + (2,))
pos[:, :, 0] = X; pos[:, :, 1] = Y
rv = multivariate_normal([mu_x, mu_y], [[variance_x, covariance_couple], [covariance_couple, variance_y]])
print(couple,[mu_x, mu_y], [[variance_x, covariance_couple], [covariance_couple, variance_y]])
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(X, Y, rv.pdf(pos),cmap='viridis',linewidth=0)
ax.set_xlabel(couple[0])
ax.set_ylabel(couple[1])
ax.set_zlabel('Z axis')
plt.show()
additional_dictionary = {'A_5':{'reaction':'H2O2 + M = 2OH + M','our_value':np.log(4.99999e8),'hong_value':np.log(5.60e8)},
'A_6':{'reaction':'OH + H2O2 = H2O + HO2','our_value':np.log(5624842396127.52),'hong_value':np.log(6.93e12)},
'A_7':{'reaction': 'OH + HO2 = H2O + O2' , 'our_value':np.log(16646221572429.6),'hong_value':np.log(1.82e13)},
'A_8':{'reaction':'2HO2 = H2O2 + O2','our_value':np.log(806831822530.157),'hong_value':np.log(3.17e12)},
'A_11':{'reaction':'2OH = H2O + O','our_value':np.log(1730749579423.63),'hong_value':np.log(2.355e12)},
'Sigma_1':{'reaction':'sigma H2O2','our_value':-.03846,'hong_value':0},
'Sigma_2':{'reaction':'sigma_HO2','our_value':.0721,'hong_value':0}}
additional_dictionary = {'A_5':{'reaction':'H2O2 + M = 2OH + M','our_value':np.log(4.99999e8),'hong_value':np.log(5.60e8)},
'A_6':{'reaction':'OH + H2O2 = H2O + HO2','our_value':np.log(5917630773605.197),'hong_value':np.log(6.93e12)},
'A_7':{'reaction': 'OH + HO2 = H2O + O2' , 'our_value':np.log(18236369573049.9),'hong_value':np.log(1.82e13)},
'A_8':{'reaction':'2HO2 = H2O2 + O2','our_value':np.log(863643827140.3533),'hong_value':np.log(3.17e12)},
'A_11':{'reaction':'2OH = H2O + O','our_value':np.log(1734217478483.0261),'hong_value':np.log(2.355e12)},
'Sigma_1':{'reaction':'sigma H2O2','our_value':-.03846,'hong_value':0},
'Sigma_2':{'reaction':'sigma_HO2','our_value':.0721,'hong_value':0}}
error_dictonary = {'A_5':{'reaction':'H2O2 + M = 2OH + M','our_value':None,'hong_value':0},
'A_6':{'reaction':'OH + H2O2 = H2O + HO2','our_value':np.log(5624842396127.52),'hong_value':0},
'A_7':{'reaction': 'OH + HO2 = H2O + O2' , 'our_value':np.log(16646221572429.6),'hong_value':0},
'A_8':{'reaction':'2HO2 = H2O2 + O2','our_value':np.log(806831822530.157),'hong_value':0},
'A_11':{'reaction':'2OH = H2O + O','our_value':np.log(1730749579423.63),'hong_value':0},
'Sigma_1':{'reaction':'sigma H2O2','our_value':-.03846,'hong_value':0},
'Sigma_2':{'reaction':'sigma_HO2','our_value':.0721,'hong_value':0}}
Z = rv.pdf(pos)
plt.figure()
levels = [.65,.95,.99]
#contour = plt.contour(X, Y, Z, levels, colors='k')
#plt.clabel(contour, colors = 'k', fmt = '%2.1f', fontsize=12)
# plt.colorbar(contour_filled)
plt.contour(X,Y,Z)
plt.xlabel(couple[0])
plt.ylabel(couple[1])
#
# plt.figure()
#
# Z_test = mlab.bivariate_normal(X, Y,np.sqrt(covariance_couple),np.sqrt(covariance_couple),mu_x,mu_y)
# z1 = mlab.bivariate_normal(0, 1 * np.sqrt(covariance_couple), np.sqrt(covariance_couple), np.sqrt(covariance_couple),mu_x,mu_y)
# z2 = mlab.bivariate_normal(0, 2 * np.sqrt(covariance_couple), np.sqrt(covariance_couple), np.sqrt(covariance_couple),mu_x,mu_y)
# z3 = mlab.bivariate_normal(0, 3 * np.sqrt(covariance_couple), np.sqrt(covariance_couple), np.sqrt(covariance_couple),mu_x,mu_y)
#
##plot Gaussian:
# im = plt.imshow(Z_test,interpolation='bilinear', origin='lower',
# extent=(-50,50,-50,50),cmap=cm.gray)
##Plot contours at whatever z values we want:
# CS = plt.contour(Z_test, [z1, z2, z3], origin='lower', extent=(-50,50,-50,50),colors='red')
if bool(additional_dictionary):
plt.xlabel(additional_dictionary[couple[0]]['reaction'])
plt.ylabel(additional_dictionary[couple[1]]['reaction'])
x_error = (additional_dictionary[couple[0]]['hong_value'])*(error_dictonary[couple[0]]['hong_value'])
print(x_error,'this is the x error')
y_error = (additional_dictionary[couple[1]]['hong_value'])*(error_dictonary[couple[1]]['hong_value'])
print(y_error,'this is the y error')
plt.errorbar(additional_dictionary[couple[0]]['hong_value'],additional_dictionary[couple[1]]['hong_value'],xerr=x_error,yerr=y_error)
plt.scatter(additional_dictionary[couple[0]]['hong_value'],additional_dictionary[couple[1]]['hong_value'],zorder=4,label='Hong Values From Table')
plt.scatter(additional_dictionary[couple[0]]['our_value'],additional_dictionary[couple[1]]['our_value'],zorder=4,marker='x',label='MSI Values')
plt.legend()
if bool(joint_data_csv):
df2 = pd.read_csv(joint_data_csv)
#plt.figure()
plt.scatter(df2[couple[0]], df2[couple[1]])
plt.savefig(self.working_directory+'/'+couple[0]+'_'+couple[1]+'_distribution'+'_.pdf',bbox_inches='tight')
def plotting_physical_model_parameter_distributions(self,
paramter_list,
shock_tube_instance,
optimized_X,
original_experimental_conditions,
T_uncertainty=.005,
P_uncertainty=.01,
X_uncertainty=.025,
directory_to_save_images='',
experiments_want_to_plot_data_from=[]):
if bool(experiments_want_to_plot_data_from)==False:
experiments_want_to_plot_data_from = np.arange(0,len(self.exp_dict_list_optimized))
try:
all_parameters = shock_tube_instance.posterior_diag_df['parameter'].tolist()
except:
all_parameters = shock_tube_instance.prior_diag_df['parameter'].tolist()
parameter_groups = ['T','P','Time']
#print(all_parameters)
list_of_species = []
for parameter in all_parameters:
if parameter[0] == 'X':
list_of_species.append(parameter.split('_')[1])
output = []
for x in list_of_species:
if x not in output:
output.append(x)
parameter_groups = parameter_groups + output
for parameter in parameter_groups:
temp_list = []
parameter_counter = 0
for i,p in enumerate(all_parameters):
if parameter == 'T':
if p[0] == 'T' and p[1] != 'i':
yaml_file = int(p.split('_')[2])
if parameter_counter in experiments_want_to_plot_data_from:
temp_list.append(optimized_X[i][0])
prior_sigma=T_uncertainty
parameter_counter+=1
elif parameter == 'Time':
if p[0] == 'T' and p[1] == 'i':
yaml_file = int(p.split('_')[3])
if parameter_counter in experiments_want_to_plot_data_from:
temp_list.append(optimized_X[i][0])
prior_sigma=T_uncertainty
parameter_counter+=1
elif parameter == 'P':
if p[0] == 'P':
yaml_file = int(p.split('_')[2])
pressure_original = original_experimental_conditions[yaml_file]['pressure']
#temp_list.append(temp_original*np.exp(optimized_X[i]) - temp_original)
if parameter_counter in experiments_want_to_plot_data_from:
temp_list.append(optimized_X[i][0])
prior_sigma = P_uncertainty
parameter_counter+=1
elif parameter =='H2O':
if p[0] == 'X' and p[2:5] == 'H2O' and p[5]== '_':
yaml_file = int(p.split('_')[3])
specie_original = original_experimental_conditions[yaml_file]['conditions']['H2O']
if parameter_counter in experiments_want_to_plot_data_from:
temp_list.append(optimized_X[i][0])
prior_sigma=X_uncertainty
parameter_counter+=1
elif parameter =='H2O2':
if p[0] == 'X' and p[2:6] == 'H2O2' and p[6]== '_':
yaml_file = int(p.split('_')[3])
specie_original = original_experimental_conditions[yaml_file]['conditions']['H2O2']
if parameter_counter in experiments_want_to_plot_data_from:
temp_list.append(optimized_X[i][0])
prior_sigma=X_uncertainty
parameter_counter+=1
elif parameter =='O2':
if p[0] == 'X' and p[2:4] == 'O2' and p[4]== '_':
yaml_file = int(p.split('_')[3])
specie_original = original_experimental_conditions[yaml_file]['conditions']['O2']
if parameter_counter in experiments_want_to_plot_data_from:
temp_list.append(optimized_X[i][0])
prior_sigma=X_uncertainty
parameter_counter+=1
elif parameter =='H':
if p[0] == 'X' and p[2:3] == 'H' and p[3]== '_':
yaml_file = int(p.split('_')[3])
specie_original = original_experimental_conditions[yaml_file]['conditions']['H']
if parameter_counter in experiments_want_to_plot_data_from:
temp_list.append(optimized_X[i][0])
prior_sigma=X_uncertainty
parameter_counter+=1
elif parameter =='CH4':
if p[0] == 'X' and p[2:5] == 'CH4' and p[5]== '_':
yaml_file = int(p.split('_')[3])
specie_original = original_experimental_conditions[yaml_file]['conditions']['CH4']
if parameter_counter in experiments_want_to_plot_data_from:
temp_list.append(optimized_X[i][0])
prior_sigma=X_uncertainty
parameter_counter+=1
else:
parameter_counter+=1
plt.figure()
mu2=0
sigma2=prior_sigma
n, bins, patches=plt.hist(temp_list,bins='auto',density=True,color='g')
(mu, sigma) = norm.fit(temp_list)
#y = mlab.normpdf( bins, mu, sigma)
y = norm.pdf(bins,mu,sigma)
x = np.linspace(mu - 3*sigma, mu + 3*sigma, 100)
l = plt.plot(bins, y, 'b--', linewidth=2)
plt.plot(x, stats.norm.pdf(x, mu, sigma),'b')
x2 = np.linspace(mu2 - 3*sigma2, mu2 + 3*sigma2, 100)
plt.plot(x2, stats.norm.pdf(x2, mu2, sigma2),'r')
#plot
plt.xlabel(parameter)
#plt.ylabel('Probability')
plt.title(r'$\mathrm{Histogram\ of\ physical\ model\ parameter:}\ \mu=%.3f,\ \sigma=%.3f$' %(mu, sigma))
plt.grid(True)
#plt.savefig(directory_to_save_images+'/'+'Including Experiments_'+ str(experiments_want_to_plot_data_from)+parameter+'_.pdf',dpi=1000,bbox_inches='tight')
def difference_plotter(self,
paramter_list,
optimized_cti_file='',
pdf_distribution_file=''):
all_parameters = self.shock_tube_instance.posterior_diag_df['parameter'].tolist()
df = self.shock_tube_instance.posterior_diag_df
gas_optimized = ct.Solution(optimized_cti_file)
for parameter in paramter_list:
indx = all_parameters.index(parameter)
variance = df['value'][indx]
letter,number = parameter.split('_')
number = int(number)
A=gas_optimized.reaction(number).rate.pre_exponential_factor
n=gas_optimized.reaction(number).rate.temperature_exponent
Ea=gas_optimized.reaction(number).rate.activation_energy
if letter =='A':
mu = np.log(A*1000)
sigma = math.sqrt(variance)
sigma = sigma
if letter == 'n':
mu = n
sigma = math.sqrt(variance)
#sigma = sigma/2
if letter == 'Ea':
mu=Ea/1000/4.184
sigma = math.sqrt(variance)
sigma = sigma*ct.gas_constant/(1000*4.184)
#sigma = sigma/2
x = np.linspace(mu - 6*sigma, mu + 6*sigma, 100)
#plt.figure()
#plt.plot(x, stats.norm.pdf(x, mu, sigma))
# plt.xlabel(parameter)
# plt.ylabel('pdf')
# plt.savefig(self.working_directory+'/'+parameter+'_distribution'+'_.pdf',bbox_inches='tight')
if bool(pdf_distribution_file):
df2 = pd.read_csv(pdf_distribution_file)
#temp = np.log(np.exp(df2[parameter].values)/9.33e13)
#plt.plot(temp,df2['pdf_'+parameter])
interp_y = np.interp(df2[parameter],x,stats.norm.pdf(x, mu, sigma))
plt.figure()
plt.plot(df2[parameter],interp_y)
plt.plot(df2[parameter],df2['pdf_'+parameter])
interp_x = np.interp(df2['pdf_'+parameter],stats.norm.pdf(x,mu,sigma),x)
y_shift = np.divide((df2['pdf_'+parameter] - interp_y),df2['pdf_'+parameter])
x_shift = np.divide((df2[parameter] - interp_x),df2[parameter])
plt.figure()
plt.title('Percent Difference In Y')
plt.plot(y_shift)
plt.xlabel(parameter)
plt.figure()
plt.plot(x_shift)
plt.title('Percent Difference In X')
plt.xlabel(parameter)
def plotting_histograms_of_MSI_simulations(self,experiments_want_to_plot_data_from=[],bins='auto',directory_to_save_images=''):
s_shape = self.S_matrix.shape[1]
if self.k_target_value_S_matrix.any():
target_values_for_s = self.k_target_value_S_matrix
s_shape = s_shape+target_values_for_s.shape[0]
y_shape = self.y_matrix.shape[0]
difference = y_shape-s_shape
y_values = self.y_matrix[0:difference,0]
Y_values = self.Y_matrix[0:difference,0]
self.lengths_of_experimental_data()
#plotting_Y Histagrams
if bool(experiments_want_to_plot_data_from):
y_values = []
Y_values = []
start = 0
stop = 0
for x in range(len(self.simulation_lengths_of_experimental_data)):
for y in range(len(self.simulation_lengths_of_experimental_data[x])):
stop = self.simulation_lengths_of_experimental_data[x][y] + start
if x in experiments_want_to_plot_data_from:
temp = self.Y_matrix[start:stop,:]
Y_values.append(temp)
temp2 = self.y_matrix[start:stop,:]
y_values.append(temp2)
start = start + self.simulation_lengths_of_experimental_data[x][y]
else:
start = start + self.simulation_lengths_of_experimental_data[x][y]
Y_values = np.vstack((Y_values))
y_values = np.vstack((y_values))
plt.figure()
plt.subplot(2,2,1)
n, bins2, patches = plt.hist(Y_values,bins=bins ,align='mid')
min_value = min(Y_values)
max_value=max(Y_values)
plt.xlim([min_value,max_value])
plt.xlabel('Y')
plt.suptitle('Including Experiments_'+ str(experiments_want_to_plot_data_from), fontsize=10)
plt.subplot(2,2,2)
plt.hist(y_values,bins=bins,align='mid')
plt.xlabel('y')
plt.subplot(2,2,3)
plt.hist(Y_values,bins=bins,density=True,align='mid')
plt.xlabel('Y')
plt.ylabel('normalized')
plt.subplot(2,2,4)
plt.hist(y_values,bins=bins,density=True,align='mid')
plt.xlabel('y')
plt.ylabel('normalized')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5)
plt.savefig(directory_to_save_images+'/'+'Including Experiments_'+ str(experiments_want_to_plot_data_from)+'_Yy_hist_4.pdf',dpi=1000,bbox_inches='tight')
#plotting two fold plots
plt.figure()
plt.subplot(2,1,1)
plt.title('Including Experiments_'+ str(experiments_want_to_plot_data_from))
n, bins2, patches = plt.hist(Y_values,bins=bins ,align='mid')
plt.xlabel('Y')
#plt.xlim([-1,1])
plt.subplot(2,1,2)
plt.hist(y_values,bins=bins,align='mid')
plt.xlabel('y')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5)
plt.savefig(directory_to_save_images+'/'+'Including Experiments_'+ str(experiments_want_to_plot_data_from)+'_Yy_hist_2.pdf',dpi=1000,bbox_inches='tight')
#plotting normalized values
plt.figure()
plt.subplot(2,1,1)
n, bins2, patches = plt.hist(Y_values,bins=bins ,align='mid',density=True)
plt.xlabel('Y')
plt.title('Including Experiments_'+ str(experiments_want_to_plot_data_from))
plt.ylabel('normalized')
plt.subplot(2,1,2)
plt.hist(y_values,bins=bins,align='mid',density=True)
plt.xlabel('y')
plt.ylabel('normalized')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5)
plt.savefig(directory_to_save_images+'/'+'Including Experiments_'+ str(experiments_want_to_plot_data_from)+'_Yy_hist_2_normalized.pdf',dpi=1000,bbox_inches='tight')
else:
plt.figure()
plt.subplot(2,2,1)
min_value = min(Y_values)
max_value=max(Y_values)
plt.xlim([min_value,max_value])
n, bins2, patches = plt.hist(Y_values,bins=bins ,align='mid')
#plt.xlim([min_value,max_value])
plt.xlabel('Y')
plt.suptitle("Including All Experiments", fontsize=10)
plt.subplot(2,2,2)
plt.hist(y_values,bins=bins,align='mid')
plt.xlabel('y')
plt.subplot(2,2,3)
plt.hist(Y_values,bins=bins,density=True,align='mid')
plt.xlabel('Y')
plt.ylabel('normalized')
plt.subplot(2,2,4)
plt.hist(y_values,bins=bins,density=True,align='mid')
plt.xlabel('y')
plt.ylabel('normalized')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5)
plt.savefig(directory_to_save_images+'/'+'Including all Experiments'+'_Yy_hist_4.pdf',dpi=1000,bbox_inches='tight')
#plotting two fold plots
plt.figure()
plt.subplot(2,1,1)
min_value = np.min(Y_values)
max_value = np.max(Y_values)
plt.title('Including all Experiments')
n, bins2, patches = plt.hist(Y_values,bins=bins ,align='mid')
plt.xlabel('Y')
#plt.xlim([-1,1])
plt.subplot(2,1,2)
plt.hist(y_values,bins=bins,align='mid')
plt.xlabel('y')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5)
plt.savefig(directory_to_save_images+'/'+'Including all Experiments'+'_Yy_hist_2.pdf',dpi=1000,bbox_inches='tight')
#plotting normalized values
plt.figure()
plt.subplot(2,1,1)
min_value = np.min(Y_values)
max_value = np.max(Y_values)
n, bins2, patches = plt.hist(Y_values,bins=bins ,align='mid',density=True)
plt.xlabel('Y')
plt.title('Including all Experiments')
plt.ylabel('normalized')
plt.subplot(2,1,2)
plt.hist(y_values,bins=bins,align='mid',density=True)
plt.xlabel('y')
plt.ylabel('normalized')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5)
plt.savefig(directory_to_save_images+'/'+'Including all Experiments'+'_Yy_hist_2_normalized.pdf',dpi=1000,bbox_inches='tight')
def plotting_T_and_time_full_simulation(self,experiments_want_to_plot_data_from=[],directory_to_save_images=''):
init_temperature_list = []
for exp in self.exp_dict_list_original:
init_temperature_list.append(exp['simulation'].temperature)
tottal_times = []
temperature_list_full_simulation = []
for i,exp in enumerate(self.exp_dict_list_optimized):
single_exp_dict = []
temp_list_single_experiment = []
observable_counter=0
for j,observable in enumerate(exp['mole_fraction_observables'] + exp['concentration_observables']):
if observable == None:
continue
if observable in exp['mole_fraction_observables']:
single_exp_dict.append(exp['experimental_data'][observable_counter]['Time']*1e3)
interploated_temp = np.interp(exp['experimental_data'][observable_counter]['Time'],exp['simulation'].timeHistories[0]['time'],exp['simulation'].timeHistories[0]['temperature'])
temp_list_single_experiment.append(interploated_temp)
observable_counter+=1
if observable in exp['concentration_observables']:
single_exp_dict.append(exp['experimental_data'][observable_counter]['Time']*1e3)
interploated_temp = np.interp(exp['experimental_data'][observable_counter]['Time'],exp['simulation'].timeHistories[0]['time'],exp['simulation'].timeHistories[0]['temperature'])
temp_list_single_experiment.append(interploated_temp)
#print(interploated_temp.shape ,exp['experimental_data'][observable_counter]['Time'].shape )
observable_counter+=1
if 'perturbed_coef' in exp.keys():
wavelengths = self.parsed_yaml_list[i]['absorbanceCsvWavelengths']
for k,wl in enumerate(wavelengths):
single_exp_dict.append(exp['absorbance_experimental_data'][k]['time']*1e3)
interploated_temp = np.interp(exp['absorbance_experimental_data'][k]['time'],exp['simulation'].timeHistories[0]['time'],exp['simulation'].timeHistories[0]['temperature'])
temp_list_single_experiment.append(interploated_temp)
#print(interploated_temp.shape, exp['absorbance_experimental_data'][k]['time'].shape )
tottal_times.append(single_exp_dict)
temperature_list_full_simulation.append(temp_list_single_experiment)
if bool(experiments_want_to_plot_data_from)==False:
experiments_want_to_plot_data_from = np.arange(0,len(self.exp_dict_list_optimized))
else:
experiments_want_to_plot_data_from = experiments_want_to_plot_data_from
y_values = []
Y_values = []
temperature_values_list = []
time_values_list = []
full_temperature_range_list = []
start = 0
stop = 0
for x in range(len(self.simulation_lengths_of_experimental_data)):
single_experiment_Y =[]
single_experiment_y =[]
single_experiment_temperature_values_list=[]
single_experiment_time_values_list=[]
single_experiment_full_temp_range=[]
for y in range(len(self.simulation_lengths_of_experimental_data[x])):
stop = self.simulation_lengths_of_experimental_data[x][y] + start
if x in experiments_want_to_plot_data_from:
temp = self.Y_matrix[start:stop,:]
single_experiment_Y.append(temp)
temp2 = self.y_matrix[start:stop,:]
single_experiment_y.append(temp2)
intial_temp = np.array(([init_temperature_list[x]]*temp.shape[0]))
intial_temp = intial_temp.reshape((intial_temp.shape[0],1))
single_experiment_temperature_values_list.append(intial_temp)
time_values = tottal_times[x][y].values
time_values = time_values.reshape((time_values.shape[0],1))
single_experiment_time_values_list.append(time_values)
temperature_full = temperature_list_full_simulation[x][y]
temperature_full = temperature_full.reshape((temperature_full.shape[0],1))
single_experiment_full_temp_range.append(temperature_full)
start = start + self.simulation_lengths_of_experimental_data[x][y]
else:
start = start + self.simulation_lengths_of_experimental_data[x][y]
Y_values.append(single_experiment_Y)
y_values.append(single_experiment_y)
temperature_values_list.append(single_experiment_temperature_values_list)
time_values_list.append(single_experiment_time_values_list)
full_temperature_range_list.append(single_experiment_full_temp_range)
x = np.arange(10)
ys = [i+x+(i*x)**2 for i in range(10)]
colors=cm.rainbow(np.linspace(0,1,30))
#colors = cm.rainbow(np.linspace(0, 1, len(ys)))
plt.figure()
for x,simulation_list in enumerate(Y_values):
for y,lst in enumerate(Y_values[x]):
plt.subplot(2,1,1)
plt.xlabel('Y')
plt.ylabel('Time')
plt.scatter(Y_values[x][y],time_values_list[x][y],label='Experiment_'+str(x)+'_observable_'+str(y),color=colors[x])
plt.legend(ncol=2,bbox_to_anchor=(1, 0.5))
plt.subplot(2,1,2)
plt.scatter(y_values[x][y],time_values_list[x][y],color=colors[x])
plt.xlabel('y')
plt.ylabel('Time')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5)
plt.savefig(directory_to_save_images+'/'+'Including Experiments_'+ str(experiments_want_to_plot_data_from)+'_Yy_vs_time.pdf',dpi=1000,bbox_inches='tight')
plt.figure()
for x,simulation_list in enumerate(Y_values):
for y,lst in enumerate(Y_values[x]):
plt.subplot(2,1,1)
plt.scatter(Y_values[x][y],temperature_values_list[x][y],label='Experiment_'+str(x)+'_observable_'+str(y),color=colors[x])
plt.legend(ncol=2,bbox_to_anchor=(1, 0.5))
plt.xlabel('Y')
plt.ylabel('Initial Simulation Temp')
plt.subplot(2,1,2)
plt.scatter(y_values[x][y],temperature_values_list[x][y],color=colors[x])
plt.xlabel('y')
plt.ylabel('Initial Simulation Temp')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5)
plt.savefig(directory_to_save_images+'/'+'Including Experiments_'+ str(experiments_want_to_plot_data_from)+'_Yy_vs_init_temp.pdf',dpi=1000,bbox_inches='tight')
plt.figure()
for x,simulation_list in enumerate(Y_values):
for y,lst in enumerate(Y_values[x]):
plt.subplot(2,1,1)
plt.scatter(Y_values[x][y],full_temperature_range_list[x][y],label='Experiment_'+str(x)+'_observable_'+str(y),color=colors[x])
plt.legend(ncol=2,bbox_to_anchor=(1, 0.5))
plt.xlabel('Y')
plt.ylabel('Temperature')
plt.subplot(2,1,2)
plt.scatter(y_values[x][y],full_temperature_range_list[x][y],color=colors[x])
plt.xlabel('y')
plt.ylabel('Temperature')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5)
plt.savefig(directory_to_save_images+'/'+'Including Experiments_'+ str(experiments_want_to_plot_data_from)+'_Yy_vs_temperature.pdf',dpi=1000,bbox_inches='tight')
return
#working here
def plotting_histograms_of_individual_observables(self,experiments_want_to_plot_data_from,bins='auto',directory_to_save_images='',csv=''):
s_shape = self.S_matrix.shape[1]
if self.k_target_value_S_matrix.any():
target_values_for_s = self.k_target_value_S_matrix
s_shape = s_shape+target_values_for_s.shape[0]
y_shape = self.y_matrix.shape[0]
difference = y_shape-s_shape
y_values = self.y_matrix[0:difference,0]
Y_values = self.Y_matrix[0:difference,0]
self.lengths_of_experimental_data()
#plotting_Y Histagrams
#obserervable_list = []
observables_tottal = []
for i,exp in enumerate(self.exp_dict_list_optimized):
observable_counter=0
single_experiment = []
for j,observable in enumerate(exp['mole_fraction_observables'] + exp['concentration_observables']):
if observable == None:
continue
if observable in exp['mole_fraction_observables']:
single_experiment.append(observable)
observable_counter+=1
if observable in exp['concentration_observables']:
single_experiment.append(observable)
observable_counter+=1
if 'perturbed_coef' in exp.keys():
wavelengths = self.parsed_yaml_list[i]['absorbanceCsvWavelengths']
for k,wl in enumerate(wavelengths):
single_experiment.append(wl)
observables_tottal.append(single_experiment)
observables_flatten = [item for sublist in observables_tottal for item in sublist]
from collections import OrderedDict
observables_unique = list(OrderedDict.fromkeys(observables_flatten))
empty_nested_observable_list_Y = [[] for x in range(len(observables_unique))]
empty_nested_observable_list_y = [[] for x in range(len(observables_unique))]
if bool(experiments_want_to_plot_data_from):
y_values = []
Y_values = []
start = 0
stop = 0
for x in range(len(self.simulation_lengths_of_experimental_data)):
for y in range(len(self.simulation_lengths_of_experimental_data[x])):
current_observable = observables_tottal[x][y]
stop = self.simulation_lengths_of_experimental_data[x][y] + start
if x in experiments_want_to_plot_data_from:
temp = self.Y_matrix[start:stop,:]
empty_nested_observable_list_Y[observables_unique.index(current_observable)].append(temp)
temp2 = self.y_matrix[start:stop,:]
empty_nested_observable_list_y[observables_unique.index(current_observable)].append(temp2)
start = start + self.simulation_lengths_of_experimental_data[x][y]
else:
start = start + self.simulation_lengths_of_experimental_data[x][y]
for i,observable in enumerate(empty_nested_observable_list_Y):
if bool(observable):
Y_values = np.vstack((observable))
y_values = np.vstack((empty_nested_observable_list_y[i]))
plt.figure()
plt.subplot(2,2,1)
n, bins2, patches = plt.hist(Y_values,bins=bins ,align='mid')
min_value = min(Y_values)
max_value=max(Y_values)
plt.xlim([min_value,max_value])
plt.xlabel('Y')
plt.suptitle(str(observables_unique[i])+'_Including Experiments_'+ str(experiments_want_to_plot_data_from), fontsize=10)
plt.subplot(2,2,2)
plt.hist(y_values,bins=bins,align='mid')
plt.xlabel('y')
plt.subplot(2,2,3)
plt.hist(Y_values,bins=bins,density=True,align='mid')
plt.xlabel('Y')
plt.ylabel('normalized')
plt.subplot(2,2,4)
plt.hist(y_values,bins=bins,density=True,align='mid')
plt.xlabel('y')
plt.ylabel('normalized')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5)
#plt.savefig(directory_to_save_images+'/'+str(observables_unique[i])+'_Including Experiments_'+ str(experiments_want_to_plot_data_from)+'_Yy_hist_4.pdf',dpi=1000,bbox_inches='tight')
#plotting two fold plots
plt.figure()
plt.subplot(2,1,1)
plt.title(str(observables_unique[i])+'_Including Experiments_'+ str(experiments_want_to_plot_data_from))
n, bins2, patches = plt.hist(Y_values,bins=bins ,align='mid')
plt.xlabel('Y')
#plt.xlim([-1,1])
plt.subplot(2,1,2)
plt.hist(y_values,bins=bins,align='mid')
plt.xlabel('y')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5)
#plt.savefig(directory_to_save_images+'/'+str(observables_unique[i])+'_Including Experiments_'+ str(experiments_want_to_plot_data_from)+'_Yy_hist_2.pdf',dpi=1000,bbox_inches='tight')
#plotting normalized values
plt.figure()
plt.subplot(2,1,1)
n, bins2, patches = plt.hist(Y_values,bins=bins ,align='mid',density=True)
plt.xlabel('Y')
plt.title(str(observables_unique[i])+'_Including Experiments_'+ str(experiments_want_to_plot_data_from))
plt.ylabel('normalized')
plt.subplot(2,1,2)
plt.hist(y_values,bins=bins,align='mid',density=True)
plt.xlabel('y')
plt.ylabel('normalized')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5)
#plotting two fold plots
plt.figure()
plt.subplot(2,1,1)
plt.title(str(observables_unique[i])+'_Including Experiments_'+ str(experiments_want_to_plot_data_from))
n, bins2, patches = plt.hist(Y_values,bins=bins ,align='mid')
plt.xlabel('Y')
#plt.xlim([-1,1])
plt.subplot(2,1,2)
plt.hist(y_values,bins=bins,align='mid')
plt.xlabel('y')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5)
#plt.savefig(directory_to_save_images+'/'+str(observables_unique[i])+'_Including Experiments_'+ str(experiments_want_to_plot_data_from)+'_Yy_hist_2.pdf',dpi=1000,bbox_inches='tight')
#plotting normalized values
plt.figure()
plt.subplot(2,1,1)
n, bins2, patches = plt.hist(Y_values,bins=bins ,align='mid',density=True)
plt.xlabel('Y')
plt.title(str(observables_unique[i])+'_Including Experiments_'+ str(experiments_want_to_plot_data_from))
plt.ylabel('normalized')
plt.subplot(2,1,2)
plt.hist(y_values,bins=bins,align='mid',density=True)
plt.xlabel('y')
plt.ylabel('normalized')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5)
# plt.savefig(directory_to_save_images+'/'+str(observables_unique[i])+'_Including Experiments_'+ str(experiments_want_to_plot_data_from)+'_Yy_hist_2_normalized.pdf',dpi=1000,bbox_inches='tight')
def plotting_histograms_of_individual_observables_for_paper_2(self,experiments_want_to_plot_data_from,experiments_want_to_plot_data_from_2=[],bins='auto',directory_to_save_images='',csv=''):
s_shape = self.S_matrix.shape[1]
if self.k_target_value_S_matrix.any():
target_values_for_s = self.k_target_value_S_matrix
s_shape = s_shape+target_values_for_s.shape[0]
y_shape = self.y_matrix.shape[0]
difference = y_shape-s_shape
y_values = self.y_matrix[0:difference,0]
Y_values = self.Y_matrix[0:difference,0]
self.lengths_of_experimental_data()
#plotting_Y Histagrams
#edit this part
#obserervable_list = []
observables_tottal = []
for i,exp in enumerate(self.exp_dict_list_optimized):
observable_counter=0
single_experiment = []
for j,observable in enumerate(exp['mole_fraction_observables'] + exp['concentration_observables']):
if observable == None:
continue
if observable in exp['mole_fraction_observables']:
single_experiment.append(observable)
observable_counter+=1
if observable in exp['concentration_observables']:
single_experiment.append(observable)
observable_counter+=1
if 'perturbed_coef' in exp.keys():
wavelengths = self.parsed_yaml_list[i]['absorbanceCsvWavelengths']
for k,wl in enumerate(wavelengths):
single_experiment.append(wl)
observables_tottal.append(single_experiment)
observables_flatten = [item for sublist in observables_tottal for item in sublist]
from collections import OrderedDict
observables_unique = list(OrderedDict.fromkeys(observables_flatten))
empty_nested_observable_list_Y = [[] for x in range(len(observables_unique))]
empty_nested_observable_list_y = [[] for x in range(len(observables_unique))]
empty_nested_observable_list_Z = [[] for x in range(len(observables_unique))]
empty_nested_observable_list_Y_2 = [[] for x in range(len(observables_unique))]
empty_nested_observable_list_y_2 = [[] for x in range(len(observables_unique))]
empty_nested_observable_list_Z_2 = [[] for x in range(len(observables_unique))]
if bool(experiments_want_to_plot_data_from):
print('inside here')
y_values = []
Y_values = []
start = 0
stop = 0
for x in range(len(self.simulation_lengths_of_experimental_data)):
for y in range(len(self.simulation_lengths_of_experimental_data[x])):
current_observable = observables_tottal[x][y]
stop = self.simulation_lengths_of_experimental_data[x][y] + start
if x in experiments_want_to_plot_data_from:
temp = self.Y_matrix[start:stop,:]
empty_nested_observable_list_Y[observables_unique.index(current_observable)].append(temp)
temp2 = self.y_matrix[start:stop,:]
empty_nested_observable_list_y[observables_unique.index(current_observable)].append(temp2)
temp3 = self.z_matrix[start:stop,:]
empty_nested_observable_list_Z[observables_unique.index(current_observable)].append(temp3)
start = start + self.simulation_lengths_of_experimental_data[x][y]
else:
start = start + self.simulation_lengths_of_experimental_data[x][y]
if bool(experiments_want_to_plot_data_from_2):
start = 0
stop = 0
for x in range(len(self.simulation_lengths_of_experimental_data)):
for y in range(len(self.simulation_lengths_of_experimental_data[x])):
current_observable = observables_tottal[x][y]
stop = self.simulation_lengths_of_experimental_data[x][y] + start
if x in experiments_want_to_plot_data_from_2:
temp = self.Y_matrix[start:stop,:]
empty_nested_observable_list_Y_2[observables_unique.index(current_observable)].append(temp)
temp2 = self.y_matrix[start:stop,:]
empty_nested_observable_list_y_2[observables_unique.index(current_observable)].append(temp2)
temp3 = self.z_matrix[start:stop,:]
empty_nested_observable_list_Z_2[observables_unique.index(current_observable)].append(temp3)
start = start + self.simulation_lengths_of_experimental_data[x][y]
else:
start = start + self.simulation_lengths_of_experimental_data[x][y]
import matplotlib.gridspec as gridspec
fig = plt.figure(figsize=(6,7))
gs = gridspec.GridSpec(3, 1,height_ratios=[3,3,3],wspace=0.1,hspace=0.1)
gs.update(wspace=0, hspace=0.7)
ax1=plt.subplot(gs[0])
ax2=plt.subplot(gs[1])
ax3=plt.subplot(gs[2])
for i,observable in enumerate(empty_nested_observable_list_Y):
new_Y_test_2 =[]
if bool(observable):
Y_values = np.vstack((observable))
y_values = np.vstack((empty_nested_observable_list_y[i]))
z_values = np.vstack((empty_nested_observable_list_Z[i]))
indecies = np.argwhere(z_values > 100)
new_y_test = copy.deepcopy(Y_values)
new_y_test = np.delete(new_y_test,indecies)
# print(indecies.shape)
# print(indecies)
# print(i)
if bool(experiments_want_to_plot_data_from_2) and bool(empty_nested_observable_list_y_2[i]):
Y_values_2 = np.vstack((empty_nested_observable_list_Y_2[i]))
y_values_2 = np.vstack((empty_nested_observable_list_y_2[i]))
z_values_2 = np.vstack((empty_nested_observable_list_Z_2[i]))
indecies_2 = np.argwhere(z_values_2 > 100)
new_Y_test_2 = copy.deepcopy(Y_values_2)
new_Y_test_2 = np.delete(new_Y_test_2,indecies_2)
#plt.figure()
#plt.subplot(1,1,1)
#plt.subplots(3,1,1)
#n, bins2, patches = plt.hist(Y_values,bins=bins ,align='mid',density=True,label='Hong Experiments')
test = [-0.06402874, -0.05325865, -0.04248857, -0.03171848, -0.02094839, -0.0101783,
0.00059179, 0.01136188, 0.02213197, 0.03290205, 0.04367214, 0.05444223,
0.06521232, 0.07598241, 0.0867525, 0.09752259, 0.10829268]
if i ==0:
#n, bins2, patches = plt.hist(new_y_test,bins=bins ,align='mid',density=True,label='Hong Experiments')
#ax1.hist(new_y_test,bins=bins ,align='mid',density=True,label='Hong Experiments')
n,bins_test_1,patches = ax1.hist(new_y_test,bins=bins ,align='mid',density=True,label='#1')
ax1.set_xlim(left=-.3, right=.3, emit=True, auto=False)
ax1.set_ylim(top=15,bottom=0)
ax1.set_xlabel('Y')
ax1.set_xlabel('Relative Difference')
#plt.title(str(observables_unique[i])+'_Including Experiments_'+ str(experiments_want_to_plot_data_from))
ax1.set_title(str(observables_unique[i]))
ax1.set_ylabel('pdf')
#plt.ylabel('normalized')
if bool(experiments_want_to_plot_data_from_2):
# plt.hist(Y_values_2,bins=bins ,align='mid',density=True,alpha=0.5,label='Extra Experiments')
#ax1.hist(new_Y_test_2,bins=bins ,align='mid',density=True,alpha=0.5,label='Extra Experiments')
ax1.hist(new_Y_test_2,bins=bins ,align='mid',density=True,alpha=0.5,label='#2')
if bool(csv):
df = pd.read_csv(csv)
#ax1.hist(df[str(observables_unique[i])+'_Y'].dropna()*-1,bins=bins ,align='mid',density=True,alpha=0.5,label='Hong vs. Hong')
#ax1.hist(df[str(observables_unique[i])+'_Y'].dropna()*-1,bins=bins ,align='mid',density=True,alpha=0.5,label='#3')
ax1.legend()
if i ==1:
#n, bins2, patches = plt.hist(new_y_test,bins=bins ,align='mid',density=True,label='Hong Experiments')
n,bins_test_2,patches = ax2.hist(new_y_test,bins=bins ,align='mid',density=True,label='Hong Experiments')
ax2.set_xlim(left=-.08, right=.08, emit=True, auto=False)
ax2.set_ylim(top=28,bottom=0)
ax2.set_xlabel('Y')
ax2.set_xlabel('Relative Difference')
#plt.title(str(observables_unique[i])+'_Including Experiments_'+ str(experiments_want_to_plot_data_from))
#ax2.set_title(str(observables_unique[i]))
ax2.set_title(r'H$_2$O')
ax2.set_ylabel('pdf')
#plt.ylabel('normalized')
if bool(experiments_want_to_plot_data_from_2):
# plt.hist(Y_values_2,bins=bins ,align='mid',density=True,alpha=0.5,label='Extra Experiments')
ax2.hist(new_Y_test_2,bins=bins ,align='mid',density=True,alpha=0.5,label='Extra Experiments')
if bool(csv):
df = pd.read_csv(csv)
#ax2.hist(df[str(observables_unique[i])+'_Y'].dropna()*-1,bins=bins ,align='mid',density=True,alpha=0.5,label='Hong vs. Hong')
if i ==3:
#n, bins2, patches = plt.hist(new_y_test,bins=bins ,align='mid',density=True,label='Hong Experiments')
n,bins_test_3,patches = ax3.hist(new_y_test,bins=bins ,align='mid',density=True,label='Hong Experiments')
ax3.set_xlim(left=-.15, right=.15, emit=True, auto=False)
ax3.set_ylim(top=12,bottom=0)
ax3.set_xlabel('Y')
ax3.set_xlabel('Relative Difference')
ax3.set_ylabel('pdf')
#plt.title(str(observables_unique[i])+'_Including Experiments_'+ str(experiments_want_to_plot_data_from))
ax3.set_title(str(observables_unique[i]))
ax3.set_title('Absorbance '+ str(observables_unique[i])+ ' nm')
#plt.ylabel('normalized')
if bool(experiments_want_to_plot_data_from_2):
print('inside here')
print(experiments_want_to_plot_data_from_2)
# plt.hist(Y_values_2,bins=bins ,align='mid',density=True,alpha=0.5,label='Extra Experiments')
ax3.hist(new_Y_test_2,bins=bins ,align='mid',density=True,alpha=0.5,label='Extra Experiments')
if bool(csv):
df = pd.read_csv(csv)
#ax3.hist(df[str(observables_unique[i])+'_Y'].dropna()*-1,bins=bins ,align='mid',density=True,alpha=0.5,label='Hong vs. Hong')
plt.savefig(directory_to_save_images+'/'+str(observables_unique[i])+'_Including Experiments_'+ str(experiments_want_to_plot_data_from)+'_Yy_hist_2_normalized.pdf',dpi=1000,bbox_inches='tight')
def plotting_histograms_of_individual_observables_for_paper(self,experiments_want_to_plot_data_from,experiments_want_to_plot_data_from_2=[],bins='auto',directory_to_save_images='',csv=''):
s_shape = self.S_matrix.shape[1]
if self.k_target_value_S_matrix.any():
target_values_for_s = self.k_target_value_S_matrix
s_shape = s_shape+target_values_for_s.shape[0]
y_shape = self.y_matrix.shape[0]
difference = y_shape-s_shape
y_values = self.y_matrix[0:difference,0]
Y_values = self.Y_matrix[0:difference,0]
self.lengths_of_experimental_data()
#plotting_Y Histagrams
#obserervable_list = []
observables_tottal = []
for i,exp in enumerate(self.exp_dict_list_optimized):
observable_counter=0
single_experiment = []
for j,observable in enumerate(exp['mole_fraction_observables'] + exp['concentration_observables']):
if observable == None:
continue
if observable in exp['mole_fraction_observables']:
single_experiment.append(observable)
observable_counter+=1
if observable in exp['concentration_observables']:
single_experiment.append(observable)
observable_counter+=1
if 'perturbed_coef' in exp.keys():
wavelengths = self.parsed_yaml_list[i]['absorbanceCsvWavelengths']
for k,wl in enumerate(wavelengths):
single_experiment.append(wl)
observables_tottal.append(single_experiment)
observables_flatten = [item for sublist in observables_tottal for item in sublist]
from collections import OrderedDict
observables_unique = list(OrderedDict.fromkeys(observables_flatten))
empty_nested_observable_list_Y = [[] for x in range(len(observables_unique))]
empty_nested_observable_list_y = [[] for x in range(len(observables_unique))]
empty_nested_observable_list_Z = [[] for x in range(len(observables_unique))]
empty_nested_observable_list_Y_2 = [[] for x in range(len(observables_unique))]
empty_nested_observable_list_y_2 = [[] for x in range(len(observables_unique))]
empty_nested_observable_list_Z_2 = [[] for x in range(len(observables_unique))]
if bool(experiments_want_to_plot_data_from):
print('inside here')
y_values = []
Y_values = []
start = 0
stop = 0
for x in range(len(self.simulation_lengths_of_experimental_data)):
for y in range(len(self.simulation_lengths_of_experimental_data[x])):
current_observable = observables_tottal[x][y]
stop = self.simulation_lengths_of_experimental_data[x][y] + start
if x in experiments_want_to_plot_data_from:
temp = self.Y_matrix[start:stop,:]
empty_nested_observable_list_Y[observables_unique.index(current_observable)].append(temp)
temp2 = self.y_matrix[start:stop,:]
empty_nested_observable_list_y[observables_unique.index(current_observable)].append(temp2)
temp3 = self.z_matrix[start:stop,:]
empty_nested_observable_list_Z[observables_unique.index(current_observable)].append(temp3)
start = start + self.simulation_lengths_of_experimental_data[x][y]
else:
start = start + self.simulation_lengths_of_experimental_data[x][y]
if bool(experiments_want_to_plot_data_from_2):
start = 0
stop = 0
for x in range(len(self.simulation_lengths_of_experimental_data)):
for y in range(len(self.simulation_lengths_of_experimental_data[x])):
current_observable = observables_tottal[x][y]
stop = self.simulation_lengths_of_experimental_data[x][y] + start
if x in experiments_want_to_plot_data_from_2:
temp = self.Y_matrix[start:stop,:]
empty_nested_observable_list_Y_2[observables_unique.index(current_observable)].append(temp)
temp2 = self.y_matrix[start:stop,:]
empty_nested_observable_list_y_2[observables_unique.index(current_observable)].append(temp2)
temp3 = self.z_matrix[start:stop,:]
empty_nested_observable_list_Z_2[observables_unique.index(current_observable)].append(temp3)
start = start + self.simulation_lengths_of_experimental_data[x][y]
else:
start = start + self.simulation_lengths_of_experimental_data[x][y]
import matplotlib.gridspec as gridspec
for i,observable in enumerate(empty_nested_observable_list_Y):
if bool(observable):
Y_values = np.vstack((observable))
y_values = np.vstack((empty_nested_observable_list_y[i]))
z_values = np.vstack((empty_nested_observable_list_Z[i]))
indecies = np.argwhere(z_values > 100)
new_y_test = copy.deepcopy(Y_values)
new_y_test = np.delete(new_y_test,indecies)
if bool(experiments_want_to_plot_data_from_2) and bool(empty_nested_observable_list_y_2[i]):
Y_values_2 = np.vstack((empty_nested_observable_list_Y_2[i]))
y_values_2 = np.vstack((empty_nested_observable_list_y_2[i]))
z_values_2 = np.vstack((empty_nested_observable_list_Z_2[i]))
indecies_2 = np.argwhere(z_values_2 > 100)
new_Y_test_2 = copy.deepcopy(Y_values_2)
new_Y_test_2 = np.delete(new_Y_test_2,indecies_2)
plt.figure()
plt.subplot(1,1,1)
#plt.subplots(3,1,1)
#n, bins2, patches = plt.hist(Y_values,bins=bins ,align='mid',density=True,label='Hong Experiments')
n, bins2, patches = plt.hist(new_y_test,bins=bins ,align='mid',density=True,label='Hong Experiments')
plt.xlabel('Y')
#plt.title(str(observables_unique[i])+'_Including Experiments_'+ str(experiments_want_to_plot_data_from))
plt.title(str(observables_unique[i]))
#plt.ylabel('normalized')
if bool(experiments_want_to_plot_data_from_2):
# plt.hist(Y_values_2,bins=bins ,align='mid',density=True,alpha=0.5,label='Extra Experiments')
plt.hist(new_Y_test_2,bins=bins ,align='mid',density=True,alpha=0.5,label='Extra Experiments')
if bool(csv):
df = pd.read_csv(csv)
plt.hist(df[str(observables_unique[i])+'_Y'].dropna()*-1,bins=bins ,align='mid',density=True,alpha=0.5,label='Hong vs. Hong')
plt.legend()
return
def plotting_T_and_time_full_simulation_individual_observables(self,experiments_want_to_plot_data_from,bins='auto',directory_to_save_images=''):
#working_here
observables_tottal = []
for i,exp in enumerate(self.exp_dict_list_optimized):
observable_counter=0
single_experiment = []
for j,observable in enumerate(exp['mole_fraction_observables'] + exp['concentration_observables']):
if observable == None:
continue
if observable in exp['mole_fraction_observables']:
single_experiment.append(observable)
observable_counter+=1
if observable in exp['concentration_observables']:
single_experiment.append(observable)
observable_counter+=1
if 'perturbed_coef' in exp.keys():
wavelengths = self.parsed_yaml_list[i]['absorbanceCsvWavelengths']
for k,wl in enumerate(wavelengths):
single_experiment.append(wl)
observables_tottal.append(single_experiment)
observables_flatten = [item for sublist in observables_tottal for item in sublist]
from collections import OrderedDict
observables_unique = list(OrderedDict.fromkeys(observables_flatten))
empty_nested_observable_list_Y = [[] for x in range(len(observables_unique))]
empty_nested_observable_list_y = [[] for x in range(len(observables_unique))]
empty_nested_observable_list_time = [[] for x in range(len(observables_unique))]
empty_nested_observable_list_temperature = [[] for x in range(len(observables_unique))]
empty_nested_observable_list_initial_temperature = [[] for x in range(len(observables_unique))]
if bool(experiments_want_to_plot_data_from):
start = 0
stop = 0
for x in range(len(self.simulation_lengths_of_experimental_data)):
for y in range(len(self.simulation_lengths_of_experimental_data[x])):
current_observable = observables_tottal[x][y]
stop = self.simulation_lengths_of_experimental_data[x][y] + start
if x in experiments_want_to_plot_data_from:
temp = self.Y_matrix[start:stop,:]
empty_nested_observable_list_Y[observables_unique.index(current_observable)].append(temp)
temp2 = self.y_matrix[start:stop,:]
empty_nested_observable_list_y[observables_unique.index(current_observable)].append(temp2)
start = start + self.simulation_lengths_of_experimental_data[x][y]
else:
start = start + self.simulation_lengths_of_experimental_data[x][y]
for i,exp in enumerate(self.exp_dict_list_optimized):
observable_counter=0
for j,observable in enumerate(exp['mole_fraction_observables'] + exp['concentration_observables']):
if observable == None:
continue
if i in experiments_want_to_plot_data_from:
if observable in exp['mole_fraction_observables']:
empty_nested_observable_list_time[observables_unique.index(observable)].append(exp['experimental_data'][observable_counter]['Time']*1e3)
interploated_temp = np.interp(exp['experimental_data'][observable_counter]['Time'],exp['simulation'].timeHistories[0]['time'],exp['simulation'].timeHistories[0]['temperature'])
empty_nested_observable_list_temperature[observables_unique.index(observable)].append(interploated_temp)
empty_nested_observable_list_initial_temperature[observables_unique.index(observable)].append([self.exp_dict_list_original[i]['simulation'].temperature]*np.shape(interploated_temp)[0])
observable_counter+=1
if observable in exp['concentration_observables']:
empty_nested_observable_list_time[observables_unique.index(observable)].append(exp['experimental_data'][observable_counter]['Time']*1e3)
interploated_temp = np.interp(exp['experimental_data'][observable_counter]['Time'],exp['simulation'].timeHistories[0]['time'],exp['simulation'].timeHistories[0]['temperature'])
empty_nested_observable_list_temperature[observables_unique.index(observable)].append(interploated_temp)
empty_nested_observable_list_initial_temperature[observables_unique.index(observable)].append([self.exp_dict_list_original[i]['simulation'].temperature]*np.shape(interploated_temp)[0])
observable_counter+=1
if i in experiments_want_to_plot_data_from:
if 'perturbed_coef' in exp.keys():
wavelengths = self.parsed_yaml_list[i]['absorbanceCsvWavelengths']
for k,wl in enumerate(wavelengths):
empty_nested_observable_list_time[observables_unique.index(wl)].append(exp['absorbance_experimental_data'][k]['time']*1e3)
interploated_temp = np.interp(exp['absorbance_experimental_data'][k]['time'],exp['simulation'].timeHistories[0]['time'],exp['simulation'].timeHistories[0]['temperature'])
empty_nested_observable_list_temperature[observables_unique.index(wl)].append(interploated_temp)
empty_nested_observable_list_initial_temperature[observables_unique.index(wl)].append([self.exp_dict_list_original[i]['simulation'].temperature]*np.shape(interploated_temp)[0])
#print(interploated_temp.shape, exp['absorbance_experimental_data'][k]['time'].shape )
x = np.arange(10)
ys = [i+x+(i*x)**2 for i in range(10)]
colors=cm.rainbow(np.linspace(0,1,30))
#colors = cm.rainbow(np.linspace(0, 1, len(ys)))
for x,observable in enumerate(empty_nested_observable_list_Y):
if bool(observable):
plt.figure()
for y,array in enumerate(empty_nested_observable_list_Y[x]):
plt.subplot(2,1,1)
plt.xlabel('Y')
plt.ylabel('Time')
plt.scatter(empty_nested_observable_list_Y[x][y],empty_nested_observable_list_time[x][y],label='Experiment_'+str(x)+'_observable_'+str(y),color=colors[x])
#plt.legend(ncol=2,bbox_to_anchor=(1, 0.5))
plt.title(observables_unique[x])
plt.subplot(2,1,2)
plt.scatter(empty_nested_observable_list_y[x][y],empty_nested_observable_list_time[x][y],color=colors[x])
plt.xlabel('y')
plt.ylabel('Time')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5)
plt.savefig(directory_to_save_images+'/'+str(observables_unique[x])+'_Including Experiments_'+str(experiments_want_to_plot_data_from)+'_Yy_vs_time.pdf',dpi=1000,bbox_inches='tight')
for x,observable in enumerate(empty_nested_observable_list_Y):
if bool(observable):
plt.figure()
for y,array in enumerate(empty_nested_observable_list_Y[x]):
plt.subplot(2,1,1)
plt.scatter(empty_nested_observable_list_Y[x][y],empty_nested_observable_list_temperature[x][y],label='Experiment_'+str(x)+'_observable_'+str(y),color=colors[x])
#plt.legend(ncol=2,bbox_to_anchor=(1, 0.5))
plt.xlabel('Y')
plt.ylabel('Temperature')
plt.title(observables_unique[x])
plt.subplot(2,1,2)
plt.scatter(empty_nested_observable_list_y[x][y],empty_nested_observable_list_temperature[x][y],color=colors[x])
plt.xlabel('y')
plt.ylabel('Temperature')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5)
plt.savefig(directory_to_save_images+'/'+str(observables_unique[x])+'_Including Experiments_'+str(experiments_want_to_plot_data_from)+'_Yy_vs_temperature.pdf',dpi=1000,bbox_inches='tight')
for x,observable in enumerate(empty_nested_observable_list_Y):
if bool(observable):
plt.figure()
for y,array in enumerate(empty_nested_observable_list_Y[x]):
plt.subplot(2,1,1)
plt.scatter(empty_nested_observable_list_Y[x][y],empty_nested_observable_list_initial_temperature[x][y],label='Experiment_'+str(x)+'_observable_'+str(y),color=colors[x])
#plt.legend(ncol=2,bbox_to_anchor=(1, 0.5))
plt.xlabel('Y')
plt.ylabel('Initial Temperature')
plt.title(observables_unique[x])
plt.subplot(2,1,2)
plt.scatter(empty_nested_observable_list_y[x][y],empty_nested_observable_list_initial_temperature[x][y],color=colors[x])
plt.xlabel('y')
plt.ylabel('Initial Temperature')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5)
plt.savefig(directory_to_save_images+'/'+str(observables_unique[x])+'_Including Experiments_'+str(experiments_want_to_plot_data_from)+'_Yy_vs_initial_temperature.pdf',dpi=1000,bbox_inches='tight')
def plotting_T_and_time_full_simulation_individual_observables_for_paper(self,experiments_want_to_plot_data_from,
bins='auto',
directory_to_save_images='',csv='',experiments_want_to_plot_data_from_2=[]):
#working_here
observables_tottal = []
for i,exp in enumerate(self.exp_dict_list_optimized):
observable_counter=0
single_experiment = []
for j,observable in enumerate(exp['mole_fraction_observables'] + exp['concentration_observables']):
if observable == None:
continue
if observable in exp['mole_fraction_observables']:
single_experiment.append(observable)
observable_counter+=1
if observable in exp['concentration_observables']:
single_experiment.append(observable)
observable_counter+=1
if 'perturbed_coef' in exp.keys():
wavelengths = self.parsed_yaml_list[i]['absorbanceCsvWavelengths']
for k,wl in enumerate(wavelengths):
single_experiment.append(wl)
observables_tottal.append(single_experiment)
observables_flatten = [item for sublist in observables_tottal for item in sublist]
from collections import OrderedDict
observables_unique = list(OrderedDict.fromkeys(observables_flatten))
empty_nested_observable_list_Y = [[] for x in range(len(observables_unique))]
empty_nested_observable_list_y = [[] for x in range(len(observables_unique))]
empty_nested_observable_list_time = [[] for x in range(len(observables_unique))]
empty_nested_observable_list_temperature = [[] for x in range(len(observables_unique))]
empty_nested_observable_list_initial_temperature = [[] for x in range(len(observables_unique))]
if bool(experiments_want_to_plot_data_from):
start = 0
stop = 0
for x in range(len(self.simulation_lengths_of_experimental_data)):
for y in range(len(self.simulation_lengths_of_experimental_data[x])):
current_observable = observables_tottal[x][y]
stop = self.simulation_lengths_of_experimental_data[x][y] + start
if x in experiments_want_to_plot_data_from:
temp = self.Y_matrix[start:stop,:]
empty_nested_observable_list_Y[observables_unique.index(current_observable)].append(temp)
temp2 = self.y_matrix[start:stop,:]
empty_nested_observable_list_y[observables_unique.index(current_observable)].append(temp2)
start = start + self.simulation_lengths_of_experimental_data[x][y]
else:
start = start + self.simulation_lengths_of_experimental_data[x][y]
for i,exp in enumerate(self.exp_dict_list_optimized):
observable_counter=0
for j,observable in enumerate(exp['mole_fraction_observables'] + exp['concentration_observables']):
if observable == None:
continue
if i in experiments_want_to_plot_data_from:
if observable in exp['mole_fraction_observables']:
empty_nested_observable_list_time[observables_unique.index(observable)].append(exp['experimental_data'][observable_counter]['Time']*1e3)
interploated_temp = np.interp(exp['experimental_data'][observable_counter]['Time'],exp['simulation'].timeHistories[0]['time'],exp['simulation'].timeHistories[0]['temperature'])
empty_nested_observable_list_temperature[observables_unique.index(observable)].append(interploated_temp)
empty_nested_observable_list_initial_temperature[observables_unique.index(observable)].append([self.exp_dict_list_original[i]['simulation'].temperature]*np.shape(interploated_temp)[0])
observable_counter+=1
if observable in exp['concentration_observables']:
empty_nested_observable_list_time[observables_unique.index(observable)].append(exp['experimental_data'][observable_counter]['Time']*1e3)
interploated_temp = np.interp(exp['experimental_data'][observable_counter]['Time'],exp['simulation'].timeHistories[0]['time'],exp['simulation'].timeHistories[0]['temperature'])
empty_nested_observable_list_temperature[observables_unique.index(observable)].append(interploated_temp)
empty_nested_observable_list_initial_temperature[observables_unique.index(observable)].append([self.exp_dict_list_original[i]['simulation'].temperature]*np.shape(interploated_temp)[0])
observable_counter+=1
if i in experiments_want_to_plot_data_from:
if 'perturbed_coef' in exp.keys():
wavelengths = self.parsed_yaml_list[i]['absorbanceCsvWavelengths']
for k,wl in enumerate(wavelengths):
empty_nested_observable_list_time[observables_unique.index(wl)].append(exp['absorbance_experimental_data'][k]['time']*1e3)
interploated_temp = np.interp(exp['absorbance_experimental_data'][k]['time'],exp['simulation'].timeHistories[0]['time'],exp['simulation'].timeHistories[0]['temperature'])
empty_nested_observable_list_temperature[observables_unique.index(wl)].append(interploated_temp)
empty_nested_observable_list_initial_temperature[observables_unique.index(wl)].append([self.exp_dict_list_original[i]['simulation'].temperature]*np.shape(interploated_temp)[0])
####################################################################################################################################################################################################################
empty_nested_observable_list_Y_2 = [[] for x in range(len(observables_unique))]
empty_nested_observable_list_y_2 = [[] for x in range(len(observables_unique))]
empty_nested_observable_list_time_2 = [[] for x in range(len(observables_unique))]
empty_nested_observable_list_temperature_2 = [[] for x in range(len(observables_unique))]
empty_nested_observable_list_initial_temperature_2 = [[] for x in range(len(observables_unique))]
if bool(experiments_want_to_plot_data_from_2):
start = 0
stop = 0
for x in range(len(self.simulation_lengths_of_experimental_data)):
for y in range(len(self.simulation_lengths_of_experimental_data[x])):
current_observable = observables_tottal[x][y]
stop = self.simulation_lengths_of_experimental_data[x][y] + start
if x in experiments_want_to_plot_data_from_2:
temp = self.Y_matrix[start:stop,:]
empty_nested_observable_list_Y_2[observables_unique.index(current_observable)].append(temp)
temp2 = self.y_matrix[start:stop,:]
empty_nested_observable_list_y_2[observables_unique.index(current_observable)].append(temp2)
start = start + self.simulation_lengths_of_experimental_data[x][y]
else:
start = start + self.simulation_lengths_of_experimental_data[x][y]
for i,exp in enumerate(self.exp_dict_list_optimized):
observable_counter=0
for j,observable in enumerate(exp['mole_fraction_observables'] + exp['concentration_observables']):
if observable == None:
continue
if i in experiments_want_to_plot_data_from_2:
if observable in exp['mole_fraction_observables']:
empty_nested_observable_list_time_2[observables_unique.index(observable)].append(exp['experimental_data'][observable_counter]['Time']*1e3)
interploated_temp = np.interp(exp['experimental_data'][observable_counter]['Time'],exp['simulation'].timeHistories[0]['time'],exp['simulation'].timeHistories[0]['temperature'])
empty_nested_observable_list_temperature_2[observables_unique.index(observable)].append(interploated_temp)
empty_nested_observable_list_initial_temperature_2[observables_unique.index(observable)].append([self.exp_dict_list_original[i]['simulation'].temperature]*np.shape(interploated_temp)[0])
observable_counter+=1
if observable in exp['concentration_observables']:
empty_nested_observable_list_time_2[observables_unique.index(observable)].append(exp['experimental_data'][observable_counter]['Time']*1e3)
interploated_temp = np.interp(exp['experimental_data'][observable_counter]['Time'],exp['simulation'].timeHistories[0]['time'],exp['simulation'].timeHistories[0]['temperature'])
empty_nested_observable_list_temperature_2[observables_unique.index(observable)].append(interploated_temp)
empty_nested_observable_list_initial_temperature_2[observables_unique.index(observable)].append([self.exp_dict_list_original[i]['simulation'].temperature]*np.shape(interploated_temp)[0])
observable_counter+=1
if i in experiments_want_to_plot_data_from_2:
if 'perturbed_coef' in exp.keys():
wavelengths = self.parsed_yaml_list[i]['absorbanceCsvWavelengths']
for k,wl in enumerate(wavelengths):
empty_nested_observable_list_time_2[observables_unique.index(wl)].append(exp['absorbance_experimental_data'][k]['time']*1e3)
interploated_temp = np.interp(exp['absorbance_experimental_data'][k]['time'],exp['simulation'].timeHistories[0]['time'],exp['simulation'].timeHistories[0]['temperature'])
empty_nested_observable_list_temperature_2[observables_unique.index(wl)].append(interploated_temp)
empty_nested_observable_list_initial_temperature_2[observables_unique.index(wl)].append([self.exp_dict_list_original[i]['simulation'].temperature]*np.shape(interploated_temp)[0])
###################################################################################################################################################################################################################
x = np.arange(10)
ys = [i+x+(i*x)**2 for i in range(10)]
colors=cm.rainbow(np.linspace(0,1,30))
#colors = cm.rainbow(np.linspace(0, 1, len(ys)))
for x,observable in enumerate(empty_nested_observable_list_Y):
length_of_2nd_list = len(empty_nested_observable_list_Y_2[x])
if bool(observable):
plt.figure()
if bool(csv):
df = pd.read_csv(csv)
plt.scatter(df[str(observables_unique[x])+'_Y'].dropna()*-1,df[str(observables_unique[x])+'_time'].dropna()*1e3,alpha=0.5,color='k',zorder=4)
for y,array in enumerate(empty_nested_observable_list_Y[x]):
plt.subplot(1,1,1)
plt.xlabel('Y')
plt.ylabel('Time')
plt.scatter(empty_nested_observable_list_Y[x][y],empty_nested_observable_list_time[x][y],label='Experiment_'+str(x)+'_observable_'+str(y),color='blue')
if y<length_of_2nd_list:
plt.scatter(empty_nested_observable_list_Y_2[x][y],empty_nested_observable_list_time_2[x][y],color='red',zorder=4)
#plt.legend(ncol=2,bbox_to_anchor=(1, 0.5))
plt.title(observables_unique[x])
# plt.subplot(2,1,2)
# plt.scatter(empty_nested_observable_list_y[x][y],empty_nested_observable_list_time[x][y],color=colors[x])
# plt.xlabel('y')
# plt.ylabel('Time')
# plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5)
#plt.savefig(directory_to_save_images+'/'+str(observables_unique[x])+'_Including Experiments_'+str(experiments_want_to_plot_data_from)+'_Yy_vs_time.pdf',dpi=1000,bbox_inches='tight')
for x,observable in enumerate(empty_nested_observable_list_Y):
length_of_2nd_list = len(empty_nested_observable_list_Y_2[x])
if bool(observable):
plt.figure()
if bool(csv):
df = pd.read_csv(csv)
plt.scatter(df[str(observables_unique[x])+'_Y'].dropna()*-1,df[str(observables_unique[x])+'_Temperature'].dropna(),alpha=0.5,color='k',zorder=4)
for y,array in enumerate(empty_nested_observable_list_Y[x]):
plt.subplot(1,1,1)
plt.scatter(empty_nested_observable_list_Y[x][y],empty_nested_observable_list_temperature[x][y],label='Experiment_'+str(x)+'_observable_'+str(y),color='blue')
#plt.legend(ncol=2,bbox_to_anchor=(1, 0.5))
plt.xlabel('Y')
plt.ylabel('Temperature')
plt.title(observables_unique[x])
if y<length_of_2nd_list:
plt.scatter(empty_nested_observable_list_Y_2[x][y],empty_nested_observable_list_temperature_2[x][y],color='red',zorder=4)
# plt.subplot(2,1,2)
#
# plt.scatter(empty_nested_observable_list_y[x][y],empty_nested_observable_list_temperature[x][y],color=colors[x])
# plt.xlabel('y')
# plt.ylabel('Temperature')
# plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5)
#plt.savefig(directory_to_save_images+'/'+str(observables_unique[x])+'_Including Experiments_'+str(experiments_want_to_plot_data_from)+'_Yy_vs_temperature.pdf',dpi=1000,bbox_inches='tight')
for x,observable in enumerate(empty_nested_observable_list_Y):
length_of_2nd_list = len(empty_nested_observable_list_Y_2[x])
if bool(observable):
plt.figure()
if bool(csv):
df = pd.read_csv(csv)
plt.scatter(df[str(observables_unique[x])+'_Y'].dropna()*-1,df[str(observables_unique[x])+'_initial_Temperature'].dropna(),alpha=0.5,color='k',zorder=4)
for y,array in enumerate(empty_nested_observable_list_Y[x]):
plt.subplot(1,1,1)
plt.scatter(empty_nested_observable_list_Y[x][y],empty_nested_observable_list_initial_temperature[x][y],label='Experiment_'+str(x)+'_observable_'+str(y),color='blue')
#plt.legend(ncol=2,bbox_to_anchor=(1, 0.5))
plt.xlabel('Y')
plt.ylabel('Initial Temperature')
plt.title(observables_unique[x])
if y<length_of_2nd_list:
plt.scatter(empty_nested_observable_list_Y_2[x][y],empty_nested_observable_list_initial_temperature_2[x][y],color='red',zorder=4)
# plt.subplot(2,1,2)
#
# plt.scatter(empty_nested_observable_list_y[x][y],empty_nested_observable_list_initial_temperature[x][y],color=colors[x])
# plt.xlabel('y')
# plt.ylabel('Initial Temperature')
# plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5)
#plt.savefig(directory_to_save_images+'/'+str(observables_unique[x])+'_Including Experiments_'+str(experiments_want_to_plot_data_from)+'_Yy_vs_initial_temperature.pdf',dpi=1000,bbox_inches='tight')
def plotting_T_and_time_full_simulation_individual_observables_for_paper_2(self,experiments_want_to_plot_data_from,
bins='auto',
directory_to_save_images='',csv='',experiments_want_to_plot_data_from_2=[]):
#working_here
observables_tottal = []
for i,exp in enumerate(self.exp_dict_list_optimized):
observable_counter=0
single_experiment = []
for j,observable in enumerate(exp['mole_fraction_observables'] + exp['concentration_observables']):
if observable == None:
continue
if observable in exp['mole_fraction_observables']:
single_experiment.append(observable)
observable_counter+=1
if observable in exp['concentration_observables']:
single_experiment.append(observable)
observable_counter+=1
if 'perturbed_coef' in exp.keys():
wavelengths = self.parsed_yaml_list[i]['absorbanceCsvWavelengths']
for k,wl in enumerate(wavelengths):
single_experiment.append(wl)
observables_tottal.append(single_experiment)
observables_flatten = [item for sublist in observables_tottal for item in sublist]
from collections import OrderedDict
observables_unique = list(OrderedDict.fromkeys(observables_flatten))
empty_nested_observable_list_Y = [[] for x in range(len(observables_unique))]
empty_nested_observable_list_y = [[] for x in range(len(observables_unique))]
empty_nested_observable_list_time = [[] for x in range(len(observables_unique))]
empty_nested_observable_list_temperature = [[] for x in range(len(observables_unique))]
empty_nested_observable_list_initial_temperature = [[] for x in range(len(observables_unique))]
empty_nested_observable_list_z = [[] for x in range(len(observables_unique))]
if bool(experiments_want_to_plot_data_from):
start = 0
stop = 0
for x in range(len(self.simulation_lengths_of_experimental_data)):
for y in range(len(self.simulation_lengths_of_experimental_data[x])):
current_observable = observables_tottal[x][y]
stop = self.simulation_lengths_of_experimental_data[x][y] + start
if x in experiments_want_to_plot_data_from:
temp = self.Y_matrix[start:stop,:]
empty_nested_observable_list_Y[observables_unique.index(current_observable)].append(temp)
temp2 = self.y_matrix[start:stop,:]
empty_nested_observable_list_y[observables_unique.index(current_observable)].append(temp2)
temp3 = self.z_matrix[start:stop,:]
empty_nested_observable_list_z[observables_unique.index(current_observable)].append(temp3)
start = start + self.simulation_lengths_of_experimental_data[x][y]
else:
start = start + self.simulation_lengths_of_experimental_data[x][y]
for i,exp in enumerate(self.exp_dict_list_optimized):
observable_counter=0
for j,observable in enumerate(exp['mole_fraction_observables'] + exp['concentration_observables']):
if observable == None:
continue
if i in experiments_want_to_plot_data_from:
if observable in exp['mole_fraction_observables']:
empty_nested_observable_list_time[observables_unique.index(observable)].append(exp['experimental_data'][observable_counter]['Time']*1e3)
interploated_temp = np.interp(exp['experimental_data'][observable_counter]['Time'],exp['simulation'].timeHistories[0]['time'],exp['simulation'].timeHistories[0]['temperature'])
empty_nested_observable_list_temperature[observables_unique.index(observable)].append(interploated_temp)
empty_nested_observable_list_initial_temperature[observables_unique.index(observable)].append([self.exp_dict_list_original[i]['simulation'].temperature]*np.shape(interploated_temp)[0])
observable_counter+=1
if observable in exp['concentration_observables']:
empty_nested_observable_list_time[observables_unique.index(observable)].append(exp['experimental_data'][observable_counter]['Time']*1e3)
interploated_temp = np.interp(exp['experimental_data'][observable_counter]['Time'],exp['simulation'].timeHistories[0]['time'],exp['simulation'].timeHistories[0]['temperature'])
empty_nested_observable_list_temperature[observables_unique.index(observable)].append(interploated_temp)
empty_nested_observable_list_initial_temperature[observables_unique.index(observable)].append([self.exp_dict_list_original[i]['simulation'].temperature]*np.shape(interploated_temp)[0])
observable_counter+=1
if i in experiments_want_to_plot_data_from:
if 'perturbed_coef' in exp.keys():
wavelengths = self.parsed_yaml_list[i]['absorbanceCsvWavelengths']
for k,wl in enumerate(wavelengths):
empty_nested_observable_list_time[observables_unique.index(wl)].append(exp['absorbance_experimental_data'][k]['time']*1e3)
interploated_temp = np.interp(exp['absorbance_experimental_data'][k]['time'],exp['simulation'].timeHistories[0]['time'],exp['simulation'].timeHistories[0]['temperature'])
empty_nested_observable_list_temperature[observables_unique.index(wl)].append(interploated_temp)
empty_nested_observable_list_initial_temperature[observables_unique.index(wl)].append([self.exp_dict_list_original[i]['simulation'].temperature]*np.shape(interploated_temp)[0])
####################################################################################################################################################################################################################
empty_nested_observable_list_Y_2 = [[] for x in range(len(observables_unique))]
empty_nested_observable_list_y_2 = [[] for x in range(len(observables_unique))]
empty_nested_observable_list_time_2 = [[] for x in range(len(observables_unique))]
empty_nested_observable_list_temperature_2 = [[] for x in range(len(observables_unique))]
empty_nested_observable_list_initial_temperature_2 = [[] for x in range(len(observables_unique))]
empty_nested_observable_list_z_2 = [[] for x in range(len(observables_unique))]
if bool(experiments_want_to_plot_data_from_2):
start = 0
stop = 0
for x in range(len(self.simulation_lengths_of_experimental_data)):
for y in range(len(self.simulation_lengths_of_experimental_data[x])):
current_observable = observables_tottal[x][y]
stop = self.simulation_lengths_of_experimental_data[x][y] + start
if x in experiments_want_to_plot_data_from_2:
temp = self.Y_matrix[start:stop,:]
empty_nested_observable_list_Y_2[observables_unique.index(current_observable)].append(temp)
temp2 = self.y_matrix[start:stop,:]
empty_nested_observable_list_y_2[observables_unique.index(current_observable)].append(temp2)
temp3 = self.z_matrix[start:stop,:]
empty_nested_observable_list_z_2[observables_unique.index(current_observable)].append(temp3)
start = start + self.simulation_lengths_of_experimental_data[x][y]
else:
start = start + self.simulation_lengths_of_experimental_data[x][y]
for i,exp in enumerate(self.exp_dict_list_optimized):
observable_counter=0
for j,observable in enumerate(exp['mole_fraction_observables'] + exp['concentration_observables']):
if observable == None:
continue
if i in experiments_want_to_plot_data_from_2:
if observable in exp['mole_fraction_observables']:
empty_nested_observable_list_time_2[observables_unique.index(observable)].append(exp['experimental_data'][observable_counter]['Time']*1e3)
interploated_temp = np.interp(exp['experimental_data'][observable_counter]['Time'],exp['simulation'].timeHistories[0]['time'],exp['simulation'].timeHistories[0]['temperature'])
empty_nested_observable_list_temperature_2[observables_unique.index(observable)].append(interploated_temp)
empty_nested_observable_list_initial_temperature_2[observables_unique.index(observable)].append([self.exp_dict_list_original[i]['simulation'].temperature]*np.shape(interploated_temp)[0])
observable_counter+=1
if observable in exp['concentration_observables']:
empty_nested_observable_list_time_2[observables_unique.index(observable)].append(exp['experimental_data'][observable_counter]['Time']*1e3)
interploated_temp = np.interp(exp['experimental_data'][observable_counter]['Time'],exp['simulation'].timeHistories[0]['time'],exp['simulation'].timeHistories[0]['temperature'])
empty_nested_observable_list_temperature_2[observables_unique.index(observable)].append(interploated_temp)
empty_nested_observable_list_initial_temperature_2[observables_unique.index(observable)].append([self.exp_dict_list_original[i]['simulation'].temperature]*np.shape(interploated_temp)[0])
observable_counter+=1
if i in experiments_want_to_plot_data_from_2:
if 'perturbed_coef' in exp.keys():
wavelengths = self.parsed_yaml_list[i]['absorbanceCsvWavelengths']
for k,wl in enumerate(wavelengths):
empty_nested_observable_list_time_2[observables_unique.index(wl)].append(exp['absorbance_experimental_data'][k]['time']*1e3)
interploated_temp = np.interp(exp['absorbance_experimental_data'][k]['time'],exp['simulation'].timeHistories[0]['time'],exp['simulation'].timeHistories[0]['temperature'])
empty_nested_observable_list_temperature_2[observables_unique.index(wl)].append(interploated_temp)
empty_nested_observable_list_initial_temperature_2[observables_unique.index(wl)].append([self.exp_dict_list_original[i]['simulation'].temperature]*np.shape(interploated_temp)[0])
###################################################################################################################################################################################################################
x = np.arange(10)
ys = [i+x+(i*x)**2 for i in range(10)]
colors=cm.rainbow(np.linspace(0,1,30))
#colors = cm.rainbow(np.linspace(0, 1, len(ys)))
import matplotlib.gridspec as gridspec
fig = plt.figure(figsize=(6,7))
gs = gridspec.GridSpec(3, 1,height_ratios=[3,3,3],wspace=0.025,hspace=0.1)
gs.update(wspace=0, hspace=0.7)
ax1=plt.subplot(gs[0])
ax2=plt.subplot(gs[1])
ax3=plt.subplot(gs[2])
fig2 = plt.figure(figsize=(6,7))
gs2 = gridspec.GridSpec(3, 1,height_ratios=[3,3,3],wspace=0.025,hspace=0.1)
gs2.update(wspace=0, hspace=0.7)
ax4=plt.subplot(gs2[0])
ax5=plt.subplot(gs2[1])
ax6=plt.subplot(gs2[2])
for x,observable in enumerate(empty_nested_observable_list_Y):
length_of_2nd_list = len(empty_nested_observable_list_Y_2[x])
if bool(observable):
if x ==0:
if bool(csv):
df = | pd.read_csv(csv) | pandas.read_csv |
import requests
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
import os
# current directory
CD = os.path.dirname(os.path.abspath(__file__))
# shipments data filepath.
FILENAME = ''
FILEPATH = os.path.join(CD, 'tmp', FILENAME)
# other shipments data configuration.
ORIGIN_COL = ''
DEST_COL = ''
# zone type either 'Express' or 'Ground'.
ZONE_TYPE = ''
def init():
"""return pd.Dataframe and initial zones list"""
ext = FILENAME.split('.')[-1]
print('input:', FILEPATH)
if ext == 'xlsx' or ext == 'xls':
return pd.read_excel(FILEPATH, sheet_name='Sheet1'), []
elif ext == 'csv':
return pd.read_csv(FILEPATH), []
else:
print('File must be .csv, .xlsx, .xls.')
return | pd.DataFrame() | pandas.DataFrame |
import logging
import configparser
import numpy
from scipy import stats
import pandas
from trueimpactdataset_analysis.data.data_loader import DataLoader
__author__ = 'robodasha'
__email__ = '<EMAIL>'
def load_config():
"""
:return:
"""
config = configparser.ConfigParser()
config.read('config.ini')
return config
def citations_ttest():
"""
:return:
"""
config = load_config()
logger = logging.getLogger(__name__)
logger.info('Loading data')
dl = DataLoader(config['trueid']['metadata'])
papers_df = dl.load_papers()
responses_df = dl.load_responses()
logger.info('Done loading data')
nonull_ids = papers_df[
pandas.notnull(papers_df.citations_gs)].index.tolist()
pairs = responses_df.seminal_id.isin(nonull_ids) \
& responses_df.survey_id.isin(nonull_ids)
seminal_ids = responses_df[pairs].seminal_id.tolist()
survey_ids = responses_df[pairs].survey_id.tolist()
citations_seminal = papers_df[
papers_df.index.isin(seminal_ids)].citations_gs.tolist()
citations_survey = papers_df[
papers_df.index.isin(survey_ids)].citations_gs.tolist()
logger.info('Got {} seminal and {} survey citations'.format(
len(citations_seminal), len(citations_survey)))
# stats.ttest_ind performs two tailed t-test, to obtain a p-value
# for one tailed t-test we need to divide p by 2
result = stats.ttest_ind(citations_seminal, citations_survey)
logger.info('Independent t-test: p={}'.format(result[1]/2))
return
def citations_ttest_discipline():
"""
:return:
"""
config = load_config()
logger = logging.getLogger(__name__)
logger.info('Loading data')
papers_df = DataLoader(config['trueid']['metadata']).load_papers()
logger.info('Done loading data')
logger.info('Converting res. category to numerical labels')
papers_df.citations_gs.tolist()
research_area_category = pandas.factorize(papers_df.research_area)
papers_df['research_area_category'] = research_area_category[0]
logger.info('Preparing data')
citations = numpy.array(papers_df.citations_gs.tolist())
labels = numpy.array(papers_df.seminal.tolist())
groups = numpy.array(papers_df.research_area_category.tolist())
logger.info('Got {} citation values, {} labels, {} group labels'.format(
len(citations), len(labels), len(groups)))
logger.info('Finding \'other\' category')
other_cat = list(research_area_category[1]).index('Other')
logger.info('Will skip {} values'.format(
len(numpy.where(groups == other_cat)[0])))
results = []
skipped_categories = []
for category in numpy.unique(groups):
if category == other_cat:
continue
good_idxs = numpy.where(groups == category)
good_labels = labels[good_idxs]
good_citations = citations[good_idxs]
seminal_citations = good_citations[numpy.where(good_labels == True)]
survey_citations = good_citations[numpy.where(good_labels == False)]
if len(good_idxs[0]) <= 3 \
or len(numpy.where(good_labels == True)[0]) <= 1 \
or len(numpy.where(good_labels == False)[0]) <= 1:
logger.info('Insufficient number of samples for {}'.format(
research_area_category[1][category]))
skipped_categories.append(research_area_category[1][category])
continue
logger.info('Got {} seminal and {} survey citations'.format(
len(seminal_citations), len(survey_citations)))
# stats.ttest_ind performs two tailed t-test, to obtain a p-value
# for one tailed t-test we need to divide p by 2
result = stats.ttest_ind(seminal_citations, survey_citations)
logger.info('Independent t-test: p={}'.format(result[1] / 2))
results.append({
'res_area': research_area_category[1][category],
'p': result[1]/2,
'total': len(good_idxs[0])})
results_df = pandas.DataFrame(results)
results_df.set_index('res_area', inplace=True, drop=True)
logger.info('Results:\n{}'.format(results_df))
logger.info('Total number of samples: {}'.format(sum(results_df.total)))
logger.info('Skipped categories: {}'.format(skipped_categories))
return
def citations_ttest_year():
"""
:return:
"""
config = load_config()
logger = logging.getLogger(__name__)
logger.info('Loading data')
papers_df = DataLoader(config['trueid']['metadata']).load_papers()
logger.info('Done loading data')
logger.info('Preparing data')
citations = numpy.array(papers_df.citations_gs.tolist())
labels = numpy.array(papers_df.seminal.tolist())
years = numpy.array(papers_df.year.tolist())
logger.info('Got {} citation values, {} labels, {} years'.format(
len(citations), len(labels), len(years)))
results = []
skipped_categories = []
for year in sorted(numpy.unique(years)):
good_idxs = numpy.where(years == year)
good_labels = labels[good_idxs]
good_citations = citations[good_idxs]
seminal_citations = good_citations[numpy.where(good_labels == True)]
survey_citations = good_citations[numpy.where(good_labels == False)]
if len(good_idxs[0]) <= 3 \
or len(numpy.where(good_labels == True)[0]) <= 1 \
or len(numpy.where(good_labels == False)[0]) <= 1:
logger.info('Insufficient number of samples for {}'.format(year))
skipped_categories.append(year)
continue
logger.info('Got {} seminal and {} survey citations'.format(
len(seminal_citations), len(survey_citations)))
# stats.ttest_ind performs two tailed t-test, to obtain a p-value
# for one tailed t-test we need to divide p by 2
result = stats.ttest_ind(seminal_citations, survey_citations)
logger.info('Independent t-test: p={}'.format(result[1] / 2))
results.append({
'year': year,
'p': result[1]/2,
'total': len(good_idxs[0])})
results_df = pandas.DataFrame(results)
results_df.set_index('year', inplace=True, drop=True)
logger.info('Results:\n{}'.format(results_df))
logger.info('Total number of samples: {}'.format(sum(results_df.total)))
logger.info('Skipped categories: {}'.format(skipped_categories))
return
def readership_ttest():
"""
:return:
"""
config = load_config()
logger = logging.getLogger(__name__)
logger.info('Loading data')
dl = DataLoader(config['trueid']['metadata'])
mendeley_df = dl.load_mendeley_metadata()
responses_df = dl.load_responses()
logger.info('Done loading data')
pairs = pandas.notnull(responses_df.seminal_id) \
& pandas.notnull(responses_df.survey_id)
seminal_ids = responses_df[pairs].seminal_id.tolist()
survey_ids = responses_df[pairs].survey_id.tolist()
seminal_ids_all = responses_df.seminal_id.tolist()
survey_ids_all = responses_df.survey_id.tolist()
logger.info('Found {} seminal and {} survey papers in Mendeley'.format(
sum(mendeley_df.index.isin(seminal_ids_all)),
sum(mendeley_df.index.isin(survey_ids_all))))
readership_seminal = mendeley_df[
mendeley_df.index.isin(seminal_ids)].reader_count.tolist()
readership_survey = mendeley_df[
mendeley_df.index.isin(survey_ids)].reader_count.tolist()
# papers which are missing in mendeley have 0 readers
if len(readership_seminal) < sum(pairs):
readership_seminal.extend(
numpy.zeros(sum(pairs) - len(readership_seminal)))
if len(readership_survey) < sum(pairs):
readership_survey.extend(
numpy.zeros(sum(pairs) - len(readership_survey)))
logger.info('Got {} seminal and {} survey citations'.format(
len(readership_seminal), len(readership_survey)))
# stats.ttest_ind performs two tailed t-test, to obtain a p-value
# for one tailed t-test we need to divide p by 2
result = stats.ttest_ind(readership_seminal, readership_survey)
logger.info('Independent t-test: p={}'.format(result[1] / 2))
return
def readership_ttest_discipline():
"""
:return:
"""
config = load_config()
logger = logging.getLogger(__name__)
logger.info('Loading data')
dl = DataLoader(config['trueid']['metadata'])
papers_df = dl.load_papers()
mendeley_df = dl.load_mendeley_metadata()
logger.info('Done loading data')
papers_df['reader_count'] = mendeley_df.reader_count
papers_df.loc[ | pandas.isnull(papers_df.reader_count) | pandas.isnull |
#####################################################################################################
# PARETO was produced under the DOE Produced Water Application for Beneficial Reuse Environmental
# Impact and Treatment Optimization (PARETO), and is copyright (c) 2021 by the software owners: The
# Regents of the University of California, through Lawrence Berkeley National Laboratory, et al. All
# rights reserved.
#
# NOTICE. This Software was developed under funding from the U.S. Department of Energy and the
# U.S. Government consequently retains certain rights. As such, the U.S. Government has been granted
# for itself and others acting on its behalf a paid-up, nonexclusive, irrevocable, worldwide license
# in the Software to reproduce, distribute copies to the public, prepare derivative works, and perform
# publicly and display publicly, and to permit other to do so.
#####################################################################################################
"""
Authors: <NAME>
"""
from pareto.operational_water_management.operational_produced_water_optimization_model import (
ProdTank,
)
from pyomo.environ import Var
import pandas as pd
from enum import Enum
class PrintValues(Enum):
Detailed = 0
Nominal = 1
Essential = 2
def generate_report(model, is_print=[], fname=None):
# ## Printing model sets, parameters, constraints, variable values ##
printing_list = []
if model.type == "strategic":
if len(is_print) == 0:
printing_list = []
else:
# PrintValues.Detailed: Slacks values included, Same as "All"
if is_print[0].value == 0:
printing_list = [
"v_F_Piped",
"v_F_Trucked",
"v_F_Sourced",
"v_F_PadStorageIn",
"v_F_ReuseDestination",
"v_X_Capacity",
"v_T_Capacity",
"v_F_Capacity",
"v_D_Capacity",
"v_F_DisposalDestination",
"v_F_PadStorageOut",
"v_C_Piped",
"v_C_Trucked",
"v_C_Sourced",
"v_C_Disposal",
"v_C_Reuse",
"v_L_Storage",
"vb_y_Pipeline",
"vb_y_Disposal",
"vb_y_Storage",
"vb_y_Treatment",
"vb_y_FLow",
"v_F_Overview",
"v_S_FracDemand",
"v_S_Production",
"v_S_Flowback",
"v_S_PipelineCapacity",
"v_S_StorageCapacity",
"v_S_DisposalCapacity",
"v_S_TreatmentCapacity",
"v_S_ReuseCapacity",
]
# PrintValues.Nominal: Essential + Trucked water + Piped Water + Sourced water + vb_y_pipeline + vb_y_disposal + vb_y_storage + etc.
elif is_print[0].value == 1:
printing_list = [
"v_F_Piped",
"v_F_Trucked",
"v_F_Sourced",
"v_C_Piped",
"v_C_Trucked",
"v_C_Sourced",
"vb_y_Pipeline",
"vb_y_Disposal",
"vb_y_Storage",
"vb_y_Flow",
"vb_y_Treatment",
"v_F_Overview",
]
# PrintValues.Essential: Just message about slacks, "Check detailed results", Overview, Economics, KPIs
elif is_print[0].value == 2:
printing_list = ["v_F_Overview"]
else:
raise Exception("Report {0} not supported".format(is_print))
headers = {
"v_F_Overview_dict": [("Variable Name", "Documentation", "Total")],
"v_F_Piped_dict": [("Origin", "destination", "Time", "Piped water")],
"v_C_Piped_dict": [("Origin", "Destination", "Time", "Cost piping")],
"v_F_Trucked_dict": [("Origin", "Destination", "Time", "Trucked water")],
"v_C_Trucked_dict": [("Origin", "Destination", "Time", "Cost trucking")],
"v_F_Sourced_dict": [
("Fresh water source", "Completion pad", "Time", "Sourced water")
],
"v_C_Sourced_dict": [
("Fresh water source", "Completion pad", "Time", "Cost sourced water")
],
"v_F_PadStorageIn_dict": [("Completion pad", "Time", "StorageIn")],
"v_F_PadStorageOut_dict": [("Completion pad", "Time", "StorageOut")],
"v_C_Disposal_dict": [("Disposal site", "Time", "Cost of disposal")],
"v_C_Treatment_dict": [("Treatment site", "Time", "Cost of Treatment")],
"v_C_Reuse_dict": [("Completion pad", "Time", "Cost of reuse")],
"v_C_Storage_dict": [("Storage Site", "Time", "Cost of Storage")],
"v_R_Storage_dict": [
("Storage Site", "Time", "Credit of Retrieving Produced Water")
],
"v_L_Storage_dict": [("Storage site", "Time", "Storage Levels")],
"v_L_PadStorage_dict": [("Completion pad", "Time", "Storage Levels")],
"vb_y_Pipeline_dict": [
("Origin", "Destination", "Pipeline Diameter", "Pipeline Installation")
],
"vb_y_Disposal_dict": [("Disposal Site", "Injection Capacity", "Disposal")],
"vb_y_Storage_dict": [
("Storage Site", "Storage Capacity", "Storage Expansion")
],
"vb_y_Flow_dict": [("Origin", "Destination", "Time", "Flow")],
"vb_y_Treatment_dict": [
("Treatment Site", "Treatment Capacity", "Treatment Expansion")
],
"v_D_Capacity_dict": [("Disposal Site", "Disposal Site Capacity")],
"v_T_Capacity_dict": [("Treatment Site", "Treatment Capacity")],
"v_X_Capacity_dict": [("Storage Site", "Storage Site Capacity")],
"v_F_Capacity_dict": [("Origin", "Destination", "Flow Capacity")],
"v_S_FracDemand_dict": [("Completion pad", "Time", "Slack FracDemand")],
"v_S_Production_dict": [("Production pad", "Time", "Slack Production")],
"v_S_Flowback_dict": [("Completion pad", "Time", "Slack Flowback")],
"v_S_PipelineCapacity_dict": [
("Origin", "Destination", "Slack Pipeline Capacity")
],
"v_S_StorageCapacity_dict": [("Storage site", "Slack Storage Capacity")],
"v_S_DisposalCapacity_dict": [("Storage site", "Slack Disposal Capacity")],
"v_S_TreatmentCapacity_dict": [
("Treatment site", "Slack Treatment Capacity")
],
"v_S_ReuseCapacity_dict": [("Reuse site", "Slack Reuse Capacity")],
"v_F_ReuseDestination_dict": [
("Completion Pad", "Time", "Total Deliveries to Completion Pad")
],
"v_F_DisposalDestination_dict": [
("Disposal Site", "Time", "Total Deliveries to Disposal Site")
],
}
# Defining KPIs for strategic model
model.reuse_WaterKPI = Var(doc="Reuse Fraction Produced Water = [%]")
reuseWater_value = (
(model.v_F_TotalReused.value) / (model.p_beta_TotalProd.value) * 100
)
model.reuse_WaterKPI.value = reuseWater_value
model.disposal_WaterKPI = Var(doc="Disposal Fraction Produced Water = [%]")
disposalWater_value = (
(model.v_F_TotalDisposed.value) / (model.p_beta_TotalProd.value) * 100
)
model.disposal_WaterKPI.value = disposalWater_value
model.fresh_CompletionsDemandKPI = Var(
doc="Fresh Fraction Completions Demand = [%]"
)
freshDemand_value = (
(model.v_F_TotalSourced.value) / (model.p_gamma_TotalDemand.value) * 100
)
model.fresh_CompletionsDemandKPI.value = freshDemand_value
model.reuse_CompletionsDemandKPI = Var(
doc="Reuse Fraction Completions Demand = [%]"
)
reuseDemand_value = (
(model.v_F_TotalReused.value) / (model.p_gamma_TotalDemand.value) * 100
)
model.reuse_CompletionsDemandKPI.value = reuseDemand_value
elif model.type == "operational":
if len(is_print) == 0:
printing_list = []
else:
# PrintValues.Detailed: Slacks values included, Same as "All"
if is_print[0].value == 0:
printing_list = [
"v_F_Piped",
"v_F_Trucked",
"v_F_Sourced",
"v_F_PadStorageIn",
"v_L_ProdTank",
"v_F_PadStorageOut",
"v_C_Piped",
"v_C_Trucked",
"v_C_Sourced",
"v_C_Disposal",
"v_C_Reuse",
"v_L_Storage",
"vb_y_Pipeline",
"vb_y_Disposal",
"vb_y_Storage",
"vb_y_Truck",
"v_F_Drain",
"v_B_Production",
"vb_y_FLow",
"v_F_Overview",
"v_L_PadStorage",
"v_C_Treatment",
"v_C_Storage",
"v_R_Storage",
"v_S_FracDemand",
"v_S_Production",
"v_S_Flowback",
"v_S_PipelineCapacity",
"v_S_StorageCapacity",
"v_S_DisposalCapacity",
"v_S_TreatmentCapacity",
"v_S_ReuseCapacity",
"v_D_Capacity",
"v_X_Capacity",
"v_F_Capacity",
]
# PrintValues.Nominal: Essential + Trucked water + Piped Water + Sourced water + vb_y_pipeline + vb_y_disposal + vb_y_storage
elif is_print[0].value == 1:
printing_list = [
"v_F_Piped",
"v_F_Trucked",
"v_F_Sourced",
"v_C_Piped",
"v_C_Trucked",
"v_C_Sourced",
"vb_y_Pipeline",
"vb_y_Disposal",
"vb_y_Storage",
"vb_y_Flow",
"vb_y_Truck",
"v_F_Overview",
]
# PrintValues.Essential: Just message about slacks, "Check detailed results", Overview, Economics, KPIs
elif is_print[0].value == 2:
printing_list = ["v_F_Overview"]
else:
raise Exception("Report {0} not supported".format(is_print))
headers = {
"v_F_Overview_dict": [("Variable Name", "Documentation", "Total")],
"v_F_Piped_dict": [("Origin", "destination", "Time", "Piped water")],
"v_C_Piped_dict": [("Origin", "Destination", "Time", "Cost piping")],
"v_F_Trucked_dict": [("Origin", "Destination", "Time", "Trucked water")],
"v_C_Trucked_dict": [("Origin", "Destination", "Time", "Cost trucking")],
"v_F_Sourced_dict": [
("Fresh water source", "Completion pad", "Time", "Sourced water")
],
"v_C_Sourced_dict": [
("Fresh water source", "Completion pad", "Time", "Cost sourced water")
],
"v_F_PadStorageIn_dict": [("Completion pad", "Time", "StorageIn")],
"v_F_PadStorageOut_dict": [("Completion pad", "Time", "StorageOut")],
"v_C_Disposal_dict": [("Disposal site", "Time", "Cost of disposal")],
"v_C_Treatment_dict": [("Treatment site", "Time", "Cost of Treatment")],
"v_C_Reuse_dict": [("Completion pad", "Time", "Cost of reuse")],
"v_C_Storage_dict": [("Storage Site", "Time", "Cost of Storage")],
"v_R_Storage_dict": [
("Storage Site", "Time", "Credit of Retrieving Produced Water")
],
"v_L_Storage_dict": [("Storage site", "Time", "Storage Levels")],
"v_L_PadStorage_dict": [("Completion pad", "Time", "Storage Levels")],
"vb_y_Pipeline_dict": [
("Origin", "Destination", "Pipeline Diameter", "Pipeline Installation")
],
"vb_y_Disposal_dict": [("Disposal Site", "Injection Capacity", "Disposal")],
"vb_y_Storage_dict": [
("Storage Site", "Storage Capacity", "Storage Expansion")
],
"vb_y_Flow_dict": [("Origin", "Destination", "Time", "Flow")],
"vb_y_Truck_dict": [("Origin", "Destination", "Time", "Truck")],
"v_D_Capacity_dict": [("Disposal Site", "Disposal Site Capacity")],
"v_X_Capacity_dict": [("Storage Site", "Storage Site Capacity")],
"v_F_Capacity_dict": [("Origin", "Destination", "Flow Capacity")],
"v_S_FracDemand_dict": [("Completion pad", "Time", "Slack FracDemand")],
"v_S_Production_dict": [("Production pad", "Time", "Slack Production")],
"v_S_Flowback_dict": [("Completion pad", "Time", "Slack Flowback")],
"v_S_PipelineCapacity_dict": [
("Origin", "Destination", "Slack Pipeline Capacity")
],
"v_S_StorageCapacity_dict": [("Storage site", "Slack Storage Capacity")],
"v_S_DisposalCapacity_dict": [("Storage site", "Slack Disposal Capacity")],
"v_S_TreatmentCapacity_dict": [
("Treatment site", "Slack Treatment Capacity")
],
"v_S_ReuseCapacity_dict": [("Reuse site", "Slack Reuse Capacity")],
"v_F_ReuseDestination_dict": [
("Completion Pad", "Time", "Total Deliveries to Completion Pad")
],
"v_F_DisposalDestination_dict": [
("Disposal Site", "Time", "Total Deliveries to Disposal Site")
],
"v_F_TreatmentDestination_dict": [
("Disposal Site", "Time", "Total Deliveries to Disposal Site")
],
"v_B_Production_dict": [
("Pads", "Time", "Produced Water For Transport From Pad")
],
}
if model.config.production_tanks == ProdTank.equalized:
headers.update(
{"v_L_ProdTank_dict": [("Pads", "Time", "Production Tank Water Level")]}
)
headers.update(
{
"v_F_Drain_dict": [
("Pads", "Time", "Produced Water Drained From Production Tank")
]
}
)
elif model.config.production_tanks == ProdTank.individual:
headers.update(
{
"v_L_ProdTank_dict": [
("Pads", "Tank", "Time", "Production Tank Water Level")
]
}
)
headers.update(
{
"v_F_Drain_dict": [
(
"Pads",
"Tank",
"Time",
"Produced Water Drained From Production Tank",
)
]
}
)
else:
raise Exception(
"Tank Type {0} is not supported".format(model.config.production_tanks)
)
else:
raise Exception("Model type {0} is not supported".format(model.type))
for variable in model.component_objects(Var):
if variable._data is not None:
for i in variable._data:
var_value = variable._data[i].value
if i is None:
headers["v_F_Overview_dict"].append(
(variable.name, variable.doc, var_value)
)
elif i is not None and isinstance(i, str):
i = (i,)
if i is not None and var_value is not None and var_value > 0:
headers[str(variable.name) + "_dict"].append((*i, var_value))
if model.v_C_Slack.value is not None and model.v_C_Slack.value > 0:
print("!!!ATTENTION!!! One or several slack variables have been triggered!")
for i in list(headers.items())[1:]:
dict_name = i[0][: -len("_dict")]
if dict_name in printing_list:
print("\n", "=" * 10, dict_name.upper(), "=" * 10)
print(i[1][0])
for j in i[1][1:]:
print("{0}{1} = {2}".format(dict_name, j[:-1], j[-1]))
# Loop for printing Overview Information
for i in list(headers.items())[:1]:
dict_name = i[0][: -len("_dict")]
if dict_name in printing_list:
print("\n", "=" * 10, dict_name.upper(), "=" * 10)
# print(i[1][1][0])
for j in i[1][1:]:
if not j[0]: # Conditional that checks if a blank line should be added
print()
elif not j[
1
]: # Conditional that checks if the header for a section should be added
print(j[0].upper())
else:
print("{0} = {1}".format(j[1], j[2]))
# Printing warning if "proprietary_data" is True
if len(printing_list) > 0 and model.proprietary_data is True:
print(
"\n**********************************************************************"
)
print(" WARNING: This report contains Proprietary Data ")
print("**********************************************************************")
# Adding a footnote to the each dictionary indicating if the report contains Prorpietary Data
if model.proprietary_data is True:
for report in headers:
if len(headers[report]) > 1:
headers[report].append(("PROPRIETARY DATA",))
# Creating the Excel report
if fname is None:
fname = "PARETO_report.xlsx"
with | pd.ExcelWriter(fname) | pandas.ExcelWriter |
#!/usr/bin/env python
from argparse import ArgumentParser
import pandas as pd
import json
import os
import urllib
import logging
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
# Download functions
####################
def get_kegg_module_hierarchy(s):
hier = {}
# First level is 'ko00002'
for d1 in s['children']:
c1 = d1['name']
for d2 in d1['children']:
c2 = d2['name']
for d3 in d2['children']:
c3 = d3['name']
for module in d3['children']:
module_name = module['name']
kos = module['children']
hier[module_name] = {"Module_category1": c1, "Module_category2": c2,
"Module_category3": c3, "KOs": []}
for ko in kos:
ko_name = ko['name']
ko = ko_name.split(" ")[0]
hier[module_name]["KOs"].append(ko)
return hier
def get_kegg_ortholog_hierarchy(s):
hier = {}
# First level is 'ko00001'
for d1 in s['children']:
c1 = d1['name']
for d2 in d1['children']:
c2 = d2['name']
for d3 in d2['children']:
c3 = d3['name']
if not "children" in d3.keys():
continue
for ko in d3['children']:
ko_name = ko['name'].split("\t")[0]
ko_id = ko_name.split(" ")[0]
if "[EC:" in ko_name:
enzymes = ko_name.split("[")[-1].split("]")[0].lstrip("EC:").split(" ")
else:
enzymes = []
d = {"KO_category1": c1, "KO_category2": c2, "pathway": c3, "name": ko_name, "enzymes": enzymes}
try:
hier[ko_id].append(d)
except KeyError:
hier[ko_id] = [d]
return hier
def get_kegg_ortholog_info(outdir):
outdir = outdir.rstrip("/")
if not os.path.exists(outdir):
os.makedirs(outdir)
url = "https://www.genome.jp/kegg-bin/download_htext?htext=ko00001.keg&format=json"
logging.info("Fetching ko00001.keg from www.kegg.jp")
# Download file
tmp_out = os.path.expandvars("$TMPDIR/ko00001.json")
urllib.request.urlretrieve(url, tmp_out)
with open(tmp_out) as fh:
s = json.load(fh)
hier = get_kegg_ortholog_hierarchy(s)
pathways = {}
ec2path = {}
ko_out = "{}/kegg_kos.tsv".format(outdir)
ko2path_out = "{}/kegg_ko2pathways.tsv".format(outdir)
ko2ec_out = "{}/kegg_ko2ec.tsv".format(outdir)
ec2path_out = "{}/kegg_ec2pathways.tsv".format(outdir)
pathways_out = "{}/kegg_pathways.tsv".format(outdir)
for f in [ko_out, ko2path_out, ko2ec_out, ec2path_out, pathways_out]:
logging.info("Writing to {f}".format(f=f))
# Write KEGG Ortholog names, KEGG Ortholog -> Pathway map, and KEGG Ortholog -> Enzyme map
with open(ko_out, 'w') as fh_kos, open(ko2path_out, 'w') as fh_ko2path, open(ko2ec_out, 'w') as fh_ko2ec:
fh_kos.write("ko\tKO_name\n")
for ko_id, l in hier.items():
for i, d in enumerate(l):
if i == 0:
fh_kos.write("{}\t{}\n".format(ko_id, d["name"]))
for enzyme in d["enzymes"]:
fh_ko2ec.write("{}\t{}\n".format(ko_id, enzyme))
fh_ko2path.write("{}\t{}\n".format(ko_id, "map{}".format(d["pathway"].split(" ")[0])))
pathways[d["pathway"]] = {"Pathway_category1": d["KO_category1"],
"Pathway_category2": d["KO_category2"]}
for enzyme in d["enzymes"]:
try:
ec2path[enzyme].append("map{}".format(d["pathway"].split(" ")[0]))
except KeyError:
ec2path[enzyme] = ["map{}".format(d["pathway"].split(" ")[0])]
# Write Pathway information
with open(pathways_out, 'w') as fh_pathways:
fh_pathways.write("{}\t{}\t{}\t{}\n".format("Pathway_id", "Pathway_name", "Pathway_category1",
"Pathway_category2"))
for pathway, d in pathways.items():
pathway_id = pathway.split(" ")[0]
pathway_name = pathway.replace("{} ".format(pathway_id),"")
fh_pathways.write("map{}\t{}\t{}\t{}\n".format(pathway_id, pathway_name, d["Pathway_category1"],
d["Pathway_category2"]))
# Write Enzyme -> Pathway map
with open(ec2path_out, 'w') as fh_ec2path:
for enzyme, l in ec2path.items():
for pathway in set(l):
fh_ec2path.write("{}\t{}\n".format(enzyme, pathway))
def get_kegg_module_info(outdir):
outdir = outdir.rstrip("/")
if not os.path.exists(outdir):
os.makedirs(outdir)
# Process KEGG Module information
logging.info("Fetching ko00002.keg from www.kegg.jp")
url = "https://www.kegg.jp/kegg-bin/download_htext?htext=ko00002.keg&format=json"
tmp_out = os.path.expandvars("$TMPDIR/ko00002.json")
urllib.request.urlretrieve(url, tmp_out)
ko2mod_out = "{}/kegg_ko2modules.tsv".format(outdir)
modules_out = "{}/kegg_modules.tsv".format(outdir)
with open(tmp_out) as fh:
s = json.load(fh)
hier = get_kegg_module_hierarchy(s)
for f in [ko2mod_out, modules_out]:
logging.info("Writing to {f}".format(f=f))
with open(ko2mod_out, 'w') as fh_ko2mod, open(modules_out, 'w') as fh_modules:
fh_modules.write("Module_id\tModule_name\tModule_category1\tModule_category2\tModule_category3\n")
for module_name, d in hier.items():
module_key = module_name.split(" ")[0]
module_name = " ".join(module_name.split(" ")[1:]).lstrip()
fh_modules.write("{}\t{}\t{}\t{}\t{}\n".format(module_key, module_name, d["Module_category1"],
d["Module_category2"], d["Module_category3"]))
for ko in set(d["KOs"]):
fh_ko2mod.write("{}\t{}\n".format(ko,module_key))
# Parse functions
#################
def make_orf2ko_map(df):
orfs = []
kos = []
for i in df.index:
orf = df.loc[i,"orf"]
if df.loc[i,"ko"]==df.loc[i,"ko"]:
for ko in df.loc[i,"ko"].split(","):
kos.append(ko)
orfs.append(orf)
else:
orfs.append(orf)
kos.append(df.loc[i,"ko"])
dataframe = pd.DataFrame({"orf":orfs,"ko":kos})
return dataframe
def parse_ko_annotations(annotations, dldir, outdir):
logging.info("Reading annotations from {}".format(annotations))
annot = pd.read_csv(annotations, header=None, usecols=[0,2,3,6], names=["orf","evalue","score","ko"], sep="\t")
logging.info("Loading KEGG info files from {}".format(dldir))
ko2ec = pd.read_csv("{}/kegg_ko2ec.tsv".format(dldir), header=None, names=["ko","ec"], index_col=0, sep="\t")
ko2path = pd.read_csv("{}/kegg_ko2pathways.tsv".format(dldir), header=None, names=["ko","pathway"], index_col=0, sep="\t")
ko2module = pd.read_csv("{}/kegg_ko2modules.tsv".format(dldir), index_col=0, header=None, names = ["ko","module"], sep="\t")
kos = pd.read_csv("{}/kegg_kos.tsv".format(dldir), index_col=0, sep="\t")
modules = pd.read_csv("{}/kegg_modules.tsv".format(dldir), index_col=0, sep="\t")
pathways = pd.read_csv("{}/kegg_pathways.tsv".format(dldir), index_col=0, sep="\t")
orftable = make_orf2ko_map(annot)
# Because each orf can have multiple enzyme annotations it might get placed into the same pathway several times
# select the first combination for each orf to avoid redundancy.
# Add KEGG Ortholog names
orf2ko = pd.merge(orftable, kos, left_on="ko", right_index=True)
orf2ko.set_index("orf",inplace=True)
orf2ko.sort_index(inplace=True)
# Map enzymes
orf2ec = | pd.merge(orftable,ko2ec,left_on="ko",right_index=True) | pandas.merge |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
description: Estimate local time
version: 0.0.3
created: 2018-04-30
author: <NAME>
dependencies:
* tidepool-data-env (install using anaconda, see readme for details)
* wikipedia-timezone-aliases-2018-04-28.csv
license: BSD-2-Clause
TODO:
* [] see readme file
"""
# %% REQUIRED LIBRARIES
import pandas as pd
import numpy as np
import os
import sys
from pytz import timezone
from datetime import timedelta
import datetime as dt
import argparse
# %% USER INPUTS
codeDescription = "Estimate local time for each data point in the dataset"
codeVersion = "0.0.3"
parser = argparse.ArgumentParser(description=codeDescription)
parser.add_argument("-i",
"--input-data-file",
dest="inputFilePathAndName",
default="example-csv.csv",
help="csv, xlsx, or json file that contains Tidepool data")
parser.add_argument("--deprecated-timezone-list",
dest="timezoneAliasesFilePathAndName",
default="wikipedia-timezone-aliases-2018-04-28.csv",
help="a .csv file that contains a list of deprecated " +
"timezones and their alias")
parser.add_argument("-o",
"--output-data-path",
dest="outputPath",
default=os.path.join("output",
"dataWithLocalTimeEstimates"),
help="the output where the data is stored")
parser.add_argument("--day-series-output-path",
dest="daySeriesOutputPath",
default=os.path.join("output", "daySeriesData"),
help="optional path to store the contiguous day series" +
"data. If no path is specified, then data is not saved")
args = parser.parse_args()
# %% FUNCTIONS
def filterByDates(df, startDate, endDate):
# filter by qualified start & end date, and sort
df = \
df[(df.time >= startDate) &
(df.time <= (endDate + "T23:59:59"))]
return df
def convertDeprecatedTimezoneToAlias(df, tzAlias):
if "timezone" in df:
uniqueTimezones = df.timezone.unique()
uniqueTimezones = uniqueTimezones[pd.notnull(df.timezone.unique())]
for uniqueTimezone in uniqueTimezones:
alias = tzAlias.loc[tzAlias.tz.str.endswith(uniqueTimezone),
["alias"]].values
if len(alias) == 1:
df.loc[df.timezone == uniqueTimezone, ["timezone"]] = alias
return df
def largeTimezoneOffsetCorrection(df):
while ((df.timezoneOffset > 840).sum() > 0):
df.loc[df.timezoneOffset > 840, ["conversionOffset"]] = \
df.loc[df.timezoneOffset > 840, ["conversionOffset"]] - \
(1440 * 60 * 1000)
df.loc[df.timezoneOffset > 840, ["timezoneOffset"]] = \
df.loc[df.timezoneOffset > 840, ["timezoneOffset"]] - 1440
while ((df.timezoneOffset < -720).sum() > 0):
df.loc[df.timezoneOffset < -720, ["conversionOffset"]] = \
df.loc[df.timezoneOffset < -720, ["conversionOffset"]] + \
(1440 * 60 * 1000)
df.loc[df.timezoneOffset < -720, ["timezoneOffset"]] = \
df.loc[df.timezoneOffset < -720, ["timezoneOffset"]] + 1440
return df
def createContiguousDaySeries(df):
firstDay = df.date.min()
lastDay = df.date.max()
rng = pd.date_range(firstDay, lastDay).date
contiguousDaySeries = \
pd.DataFrame(rng, columns=["date"]).sort_values(
"date", ascending=False).reset_index(drop=True)
return contiguousDaySeries
def getAndPreprocessUploadRecords(df):
# first make sure deviceTag is in string format
df["deviceTags"] = df.deviceTags.astype(str)
# filter by type upload
ud = df[df.type == "upload"].copy()
# define a device type (e.g., pump, cgm, or healthkit)
ud["deviceType"] = np.nan
ud.loc[ud.deviceTags.str.contains("pump"), ["deviceType"]] = "pump"
# this is for non-healthkit cgm records only
ud.loc[((ud.deviceTags.str.contains("cgm")) &
(ud.timeProcessing != "none")), ["deviceType"]] = "cgm"
ud.loc[((ud.deviceTags.str.contains("cgm")) &
(ud.timeProcessing == "none")), ["deviceType"]] = "healthkit"
return ud
def getAndPreprocessNonDexApiCgmRecords(df):
# non-healthkit cgm and exclude dexcom-api data
if "payload" in df:
# convert payloads to strings
df["isDexcomAPI"] = df.payload.astype(str).str.contains("systemTime")
cd = df[(df.type == "cbg") &
(df.timezoneOffset.notnull()) &
(~df.isDexcomAPI.fillna(False))].copy()
else:
cd = df[(df.type == "cbg") & (df.timezoneOffset.notnull())]
return cd
def getTimezoneOffset(currentDate, currentTimezone):
tz = timezone(currentTimezone)
# here we add 1 day to the current date to account for changes to/from DST
tzoNum = int(tz.localize(currentDate + timedelta(days=1)).strftime("%z"))
tzoHours = np.floor(tzoNum / 100)
tzoMinutes = round((tzoNum / 100 - tzoHours) * 100, 0)
tzoSign = np.sign(tzoHours)
tzo = int((tzoHours * 60) + (tzoMinutes * tzoSign))
return tzo
def getTzoForDateTime(currentDateTime, currentTimezone):
tz = timezone(currentTimezone)
tzoNum = int(tz.localize(pd.to_datetime(currentDateTime)).strftime("%z"))
tzoHours = np.floor(tzoNum / 100)
tzoMinutes = round((tzoNum / 100 - tzoHours) * 100, 0)
tzoSign = np.sign(tzoHours)
tzo = int((tzoHours * 60) + (tzoMinutes * tzoSign))
return tzo
def isDSTChangeDay(currentDate, currentTimezone):
tzoCurrentDay = getTimezoneOffset(pd.to_datetime(currentDate),
currentTimezone)
tzoPreviousDay = getTimezoneOffset(pd.to_datetime(currentDate) +
timedelta(days=-1), currentTimezone)
return (tzoCurrentDay != tzoPreviousDay)
def addAnnotation(df, idx, annotationMessage):
if pd.notnull(df.loc[idx, "est.annotations"]):
df.loc[idx, ["est.annotations"]] = df.loc[idx, "est.annotations"] + \
", " + annotationMessage
else:
df.loc[idx, ["est.annotations"]] = annotationMessage
return df
def addDeviceDaySeries(df, dfContDays, deviceTypeName):
if len(df) > 0:
dfDayGroups = df.groupby("date")
dfDaySeries = pd.DataFrame(dfDayGroups.timezoneOffset.median())
if "upload" in deviceTypeName:
if "timezone" in df:
if dfDayGroups.timezone.count().values[0] > 0:
dfDaySeries["timezone"] = \
dfDayGroups.timezone.describe()["top"]
# get the timezone offset for the timezone
for i in dfDaySeries.index:
if pd.notnull(dfDaySeries.loc[i, "timezone"]):
tzo = getTimezoneOffset(
pd.to_datetime(i),
dfDaySeries.loc[i, "timezone"])
dfDaySeries.loc[i, ["timezoneOffset"]] = tzo
dfDaySeries["timeProcessing"] = \
dfDayGroups.timeProcessing.describe()["top"]
dfDaySeries = dfDaySeries.add_prefix(deviceTypeName + "."). \
rename(columns={deviceTypeName + ".date": "date"})
dfContDays = pd.merge(dfContDays, dfDaySeries.reset_index(),
on="date", how="left")
else:
dfContDays[deviceTypeName + ".timezoneOffset"] = np.nan
return dfContDays
def imputeUploadRecords(df, contDays, deviceTypeName):
daySeries = \
addDeviceDaySeries(df, contDays, deviceTypeName)
if ((len(df) > 0) & (deviceTypeName + ".timezone" in daySeries)):
for i in daySeries.index[1:]:
if pd.isnull(daySeries[deviceTypeName + ".timezone"][i]):
daySeries.loc[i, [deviceTypeName + ".timezone"]] = \
daySeries.loc[i-1, deviceTypeName + ".timezone"]
if pd.notnull(daySeries[deviceTypeName + ".timezone"][i]):
tz = daySeries.loc[i, deviceTypeName + ".timezone"]
tzo = \
getTimezoneOffset(pd.to_datetime(daySeries.loc[i, "date"]),
tz)
daySeries.loc[i, deviceTypeName + ".timezoneOffset"] = tzo
if pd.notnull(daySeries[deviceTypeName + ".timeProcessing"][i-1]):
daySeries.loc[i, deviceTypeName + ".timeProcessing"] = \
daySeries.loc[i-1, deviceTypeName + ".timeProcessing"]
else:
daySeries[deviceTypeName + ".timezone"] = np.nan
daySeries[deviceTypeName + ".timeProcessing"] = np.nan
return daySeries
def estimateTzAndTzoWithUploadRecords(cDF):
cDF["est.type"] = np.nan
cDF["est.gapSize"] = np.nan
cDF["est.timezoneOffset"] = cDF["upload.timezoneOffset"]
cDF["est.annotations"] = np.nan
if "upload.timezone" in cDF:
cDF.loc[cDF["upload.timezone"].notnull(), ["est.type"]] = "UPLOAD"
cDF["est.timezone"] = cDF["upload.timezone"]
cDF["est.timeProcessing"] = cDF["upload.timeProcessing"]
else:
cDF["est.timezone"] = np.nan
cDF["est.timeProcessing"] = np.nan
cDF.loc[((cDF["est.timezoneOffset"] !=
cDF["home.imputed.timezoneOffset"]) &
(pd.notnull(cDF["est.timezoneOffset"]))),
"est.annotations"] = "travel"
return cDF
def estimateTzAndTzoWithDeviceRecords(cDF):
# 2A. use the TZO of the pump or cgm device if it exists on a given day. In
# addition, compare the TZO to one of the imputed day series (i.e., the
# upload and home series to see if the TZ can be inferred)
for deviceType in ["pump", "cgm"]:
# find the indices of days where a TZO estimate has not been made AND
# where the device (e.g., pump or cgm) TZO has data
sIndices = cDF[((cDF["est.timezoneOffset"].isnull()) &
(cDF[deviceType + ".timezoneOffset"].notnull()))].index
# compare the device TZO to the imputed series to infer time zone
cDF = compareDeviceTzoToImputedSeries(cDF, sIndices, deviceType)
# 2B. if the TZ cannot be inferred with 2A, then see if the TZ can be
# inferred from the previous day's TZO. If the device TZO is equal to the
# previous day's TZO, AND if the previous day has a TZ estimate, use the
# previous day's TZ estimate for the current day's TZ estimate
for deviceType in ["pump", "cgm"]:
sIndices = cDF[((cDF["est.timezoneOffset"].isnull()) &
(cDF[deviceType + ".timezoneOffset"].notnull()))].index
cDF = compareDeviceTzoToPrevDayTzo(cDF, sIndices, deviceType)
# 2C. after 2A and 2B, check the DEVICE estimates to make sure that the
# pump and cgm tzo do not differ by more than 60 minutes. If they differ
# by more that 60 minutes, then mark the estimate as UNCERTAIN. Also, we
# allow the estimates to be off by 60 minutes as there are a lot of cases
# where the devices are off because the user changes the time for DST,
# at different times
sIndices = cDF[((cDF["est.type"] == "DEVICE") &
(cDF["pump.timezoneOffset"].notnull()) &
(cDF["cgm.timezoneOffset"].notnull()) &
(cDF["pump.timezoneOffset"] != cDF["cgm.timezoneOffset"])
)].index
tzoDiffGT60 = abs(cDF.loc[sIndices, "cgm.timezoneOffset"] -
cDF.loc[sIndices, "pump.timezoneOffset"]) > 60
idx = tzoDiffGT60.index[tzoDiffGT60]
cDF.loc[idx, ["est.type"]] = "UNCERTAIN"
for i in idx:
cDF = addAnnotation(cDF, i, "pump-cgm-tzo-mismatch")
return cDF
def addHomeTimezone(df, contDays):
if "timezone" in df:
homeTimezone = df["timezone"].describe()["top"]
tzo = contDays.date.apply(
lambda x: getTimezoneOffset(pd.to_datetime(x), homeTimezone))
contDays["home.imputed.timezoneOffset"] = tzo
contDays["home.imputed.timezone"] = homeTimezone
else:
contDays["home.imputed.timezoneOffset"] = np.nan
contDays["home.imputed.timezone"] = np.nan
contDays["home.imputed.timeProcessing"] = np.nan
return contDays
def getRangeOfTZOsForTimezone(tz):
minMaxTzo = [getTimezoneOffset(pd.to_datetime("1/1/2017"), tz),
getTimezoneOffset(pd.to_datetime("5/1/2017"), tz)]
rangeOfTzo = np.arange(int(min(minMaxTzo)), int(max(minMaxTzo))+1, 15)
return rangeOfTzo
def tzoRangeWithComparisonTz(df, i, comparisonTz):
# if we have a previous timezone estimate, then calcuate the range of
# timezone offset values for that time zone
if pd.notnull(comparisonTz):
rangeTzos = getRangeOfTZOsForTimezone(comparisonTz)
else:
comparisonTz = np.nan
rangeTzos = np.array([])
return rangeTzos
def tzAndTzoRangePreviousDay(df, i):
# if we have a previous timezone estimate, then calcuate the range of
# timezone offset values for that time zone
comparisonTz = df.loc[i-1, "est.timezone"]
rangeTzos = tzoRangeWithComparisonTz(df, i, comparisonTz)
return comparisonTz, rangeTzos
def tzAndTzoRangeWithHomeTz(df, i):
# if we have a previous timezone estimate, then calcuate the range of
# timezone offset values for that time zone
comparisonTz = df.loc[i, "home.imputed.timezone"]
rangeTzos = tzoRangeWithComparisonTz(df, i, comparisonTz)
return comparisonTz, rangeTzos
def assignTzoFromImputedSeries(df, i, imputedSeries):
df.loc[i, ["est.type"]] = "DEVICE"
df.loc[i, ["est.timezoneOffset"]] = \
df.loc[i, imputedSeries + ".timezoneOffset"]
df.loc[i, ["est.timezone"]] = \
df.loc[i, imputedSeries + ".timezone"]
df.loc[i, ["est.timeProcessing"]] = \
df.loc[i, imputedSeries + ".timeProcessing"]
return df
def compareDeviceTzoToImputedSeries(df, sIdx, device):
for i in sIdx:
# if the device tzo = imputed tzo, then chose the imputed tz and tzo
# note, dst is accounted for in the imputed tzo
for imputedSeries in ["pump.upload.imputed", "cgm.upload.imputed",
"healthkit.upload.imputed", "home.imputed"]:
# if the estimate has not already been made
if pd.isnull(df.loc[i, "est.timezone"]):
if df.loc[i, device + ".timezoneOffset"] == \
df.loc[i, imputedSeries + ".timezoneOffset"]:
assignTzoFromImputedSeries(df, i, imputedSeries)
df = addAnnotation(df, i,
"tz-inferred-from-" + imputedSeries)
# if the imputed series has a timezone estimate, then see if
# the current day is a dst change day
elif (pd.notnull(df.loc[i, imputedSeries + ".timezone"])):
imputedTimezone = df.loc[i, imputedSeries + ".timezone"]
if isDSTChangeDay(df.loc[i, "date"], imputedTimezone):
dstRange = getRangeOfTZOsForTimezone(imputedTimezone)
if ((df.loc[i, device + ".timezoneOffset"] in dstRange)
& (df.loc[i, imputedSeries + ".timezoneOffset"] in dstRange)):
assignTzoFromImputedSeries(df, i, imputedSeries)
df = addAnnotation(df, i, "dst-change-day")
df = addAnnotation(
df, i, "tz-inferred-from-" + imputedSeries)
return df
def assignTzoFromPreviousDay(df, i, previousDayTz):
df.loc[i, ["est.type"]] = "DEVICE"
df.loc[i, ["est.timezone"]] = previousDayTz
df.loc[i, ["est.timezoneOffset"]] = \
getTimezoneOffset(pd.to_datetime(df.loc[i, "date"]), previousDayTz)
df.loc[i, ["est.timeProcessing"]] = df.loc[i-1, "est.timeProcessing"]
df = addAnnotation(df, i, "tz-inferred-from-prev-day")
return df
def assignTzoFromDeviceTzo(df, i, device):
df.loc[i, ["est.type"]] = "DEVICE"
df.loc[i, ["est.timezoneOffset"]] = \
df.loc[i, device + ".timezoneOffset"]
df.loc[i, ["est.timeProcessing"]] = \
df.loc[i, device + ".upload.imputed.timeProcessing"]
df = addAnnotation(df, i, "likely-travel")
df = addAnnotation(df, i, "tzo-from-" + device)
return df
def compareDeviceTzoToPrevDayTzo(df, sIdx, device):
for i in sIdx[sIdx > 0]:
# first see if the previous record has a tzo
if (pd.notnull(df.loc[i-1, "est.timezoneOffset"])):
previousDayTz, dstRange = tzAndTzoRangePreviousDay(df, i)
timeDiff = abs((df.loc[i, device + ".timezoneOffset"]) -
df.loc[i-1, "est.timezoneOffset"])
# next see if the previous record has a tz
if (pd.notnull(df.loc[i-1, "est.timezone"])):
if timeDiff == 0:
assignTzoFromPreviousDay(df, i, previousDayTz)
# see if the previous day's tzo and device tzo are within the
# dst range (as that is a common problem with this data)
elif ((df.loc[i, device + ".timezoneOffset"] in dstRange)
& (df.loc[i-1, "est.timezoneOffset"] in dstRange)):
# then see if it is DST change day
if isDSTChangeDay(df.loc[i, "date"], previousDayTz):
df = addAnnotation(df, i, "dst-change-day")
assignTzoFromPreviousDay(df, i, previousDayTz)
# if it is not DST change day, then mark this as uncertain
else:
# also, check to see if the difference between device.
# tzo and prev.tzo is less than the expected dst
# difference. There is a known issue where the BtUTC
# procedure puts clock drift into the device.tzo,
# and as a result the tzo can be off by 15, 30,
# or 45 minutes.
if (((df.loc[i, device + ".timezoneOffset"] ==
min(dstRange)) |
(df.loc[i, device + ".timezoneOffset"] ==
max(dstRange))) &
((df.loc[i-1, "est.timezoneOffset"] ==
min(dstRange)) |
(df.loc[i-1, "est.timezoneOffset"] ==
max(dstRange)))):
df.loc[i, ["est.type"]] = "UNCERTAIN"
df = addAnnotation(df, i,
"likely-dst-error-OR-travel")
else:
df.loc[i, ["est.type"]] = "UNCERTAIN"
df = addAnnotation(df, i,
"likely-15-min-dst-error")
# next see if time difference between device.tzo and prev.tzo
# is off by 720 minutes, which is indicative of a common
# user AM/PM error
elif timeDiff == 720:
df.loc[i, ["est.type"]] = "UNCERTAIN"
df = addAnnotation(df, i, "likely-AM-PM-error")
# if it doesn't fall into any of these cases, then the
# tzo difference is likely due to travel
else:
df = assignTzoFromDeviceTzo(df, i, device)
elif timeDiff == 0:
df = assignTzoFromDeviceTzo(df, i, device)
# if there is no previous record to compare with check for dst errors,
# and if there are no errors, it is likely a travel day
else:
comparisonTz, dstRange = tzAndTzoRangeWithHomeTz(df, i)
timeDiff = abs((df.loc[i, device + ".timezoneOffset"]) -
df.loc[i, "home.imputed.timezoneOffset"])
if ((df.loc[i, device + ".timezoneOffset"] in dstRange)
& (df.loc[i, "home.imputed.timezoneOffset"] in dstRange)):
# see if it is DST change day
if isDSTChangeDay(df.loc[i, "date"], comparisonTz):
df = addAnnotation(df, i, "dst-change-day")
df.loc[i, ["est.type"]] = "DEVICE"
df.loc[i, ["est.timezoneOffset"]] = \
df.loc[i, device + ".timezoneOffset"]
df.loc[i, ["est.timezone"]] = \
df.loc[i, "home.imputed.timezone"]
df.loc[i, ["est.timeProcessing"]] = \
df.loc[i, device + ".upload.imputed.timeProcessing"]
# if it is not DST change day, then mark this as uncertain
else:
# also, check to see if the difference between device.
# tzo and prev.tzo is less than the expected dst
# difference. There is a known issue where the BtUTC
# procedure puts clock drift into the device.tzo,
# and as a result the tzo can be off by 15, 30,
# or 45 minutes.
if (((df.loc[i, device + ".timezoneOffset"] ==
min(dstRange)) |
(df.loc[i, device + ".timezoneOffset"] ==
max(dstRange))) &
((df.loc[i, "home.imputed.timezoneOffset"] ==
min(dstRange)) |
(df.loc[i, "home.imputed.timezoneOffset"] ==
max(dstRange)))):
df.loc[i, ["est.type"]] = "UNCERTAIN"
df = addAnnotation(df, i, "likely-dst-error-OR-travel")
else:
df.loc[i, ["est.type"]] = "UNCERTAIN"
df = addAnnotation(df, i, "likely-15-min-dst-error")
# next see if time difference between device.tzo and prev.tzo
# is off by 720 minutes, which is indicative of a common
# user AM/PM error
elif timeDiff == 720:
df.loc[i, ["est.type"]] = "UNCERTAIN"
df = addAnnotation(df, i, "likely-AM-PM-error")
# if it doesn't fall into any of these cases, then the
# tzo difference is likely due to travel
else:
df = assignTzoFromDeviceTzo(df, i, device)
return df
def getImputIndices(df, sIdx, hIdx):
lastDayIdx = len(df) - 1
currentDayIdx = sIdx.min()
tempList = pd.Series(hIdx) - currentDayIdx
prevDayIdx = currentDayIdx - 1
nextDayIdx = \
min(currentDayIdx + min(tempList[tempList >= 0]), lastDayIdx)
return currentDayIdx, prevDayIdx, nextDayIdx
def imputeByTimezone(df, currentDay, prevDaywData, nextDaywData):
gapSize = (nextDaywData - currentDay)
if prevDaywData >= 0:
if df.loc[prevDaywData, "est.timezone"] == \
df.loc[nextDaywData, "est.timezone"]:
tz = df.loc[prevDaywData, "est.timezone"]
for i in range(currentDay, nextDaywData):
df.loc[i, ["est.timezone"]] = tz
df.loc[i, ["est.timezoneOffset"]] = \
getTimezoneOffset(pd.to_datetime(df.loc[i, "date"]), tz)
df.loc[i, ["est.type"]] = "IMPUTE"
df = addAnnotation(df, i, "gap=" + str(gapSize))
df.loc[i, ["est.gapSize"]] = gapSize
# TODO: this logic should be updated to handle the edge case
# where the day before and after the gap have differing TZ, but
# the same TZO. In that case the gap should be marked as UNCERTAIN
elif df.loc[prevDaywData, "est.timezoneOffset"] == \
df.loc[nextDaywData, "est.timezoneOffset"]:
for i in range(currentDay, nextDaywData):
df.loc[i, ["est.timezoneOffset"]] = \
df.loc[prevDaywData, "est.timezoneOffset"]
df.loc[i, ["est.type"]] = "IMPUTE"
df = addAnnotation(df, i, "gap=" + str(gapSize))
df.loc[i, ["est.gapSize"]] = gapSize
else:
for i in range(currentDay, nextDaywData):
df.loc[i, ["est.type"]] = "UNCERTAIN"
df = addAnnotation(df, i, "unable-to-impute-tzo")
else:
for i in range(currentDay, nextDaywData):
df.loc[i, ["est.type"]] = "UNCERTAIN"
df = addAnnotation(df, i, "unable-to-impute-tzo")
return df
def imputeTzAndTzo(cDF):
sIndices = cDF[cDF["est.timezoneOffset"].isnull()].index
hasTzoIndices = cDF[cDF["est.timezoneOffset"].notnull()].index
if len(hasTzoIndices) > 0:
if len(sIndices) > 0:
lastDay = max(sIndices)
while ((sIndices.min() < max(hasTzoIndices)) &
(len(sIndices) > 0)):
currentDay, prevDayWithDay, nextDayIdx = \
getImputIndices(cDF, sIndices, hasTzoIndices)
cDF = imputeByTimezone(cDF, currentDay,
prevDayWithDay, nextDayIdx)
sIndices = cDF[((cDF["est.timezoneOffset"].isnull()) &
(~cDF["est.annotations"].str.contains(
"unable-to-impute-tzo").fillna(False)))].index
hasTzoIndices = cDF[cDF["est.timezoneOffset"].notnull()].index
# try to impute to the last day (earliest day) in the dataset
# if the last record has a timezone that is the home record, then
# impute using the home timezone
if len(sIndices) > 0:
currentDay = min(sIndices)
prevDayWithDay = currentDay - 1
gapSize = lastDay - currentDay
for i in range(currentDay, lastDay + 1):
if cDF.loc[prevDayWithDay, "est.timezoneOffset"] == \
cDF.loc[prevDayWithDay, "home.imputed.timezoneOffset"]:
cDF.loc[i, ["est.type"]] = "IMPUTE"
cDF.loc[i, ["est.timezoneOffset"]] = \
cDF.loc[i, "home.imputed.timezoneOffset"]
cDF.loc[i, ["est.timezone"]] = \
cDF.loc[i, "home.imputed.timezone"]
cDF = addAnnotation(cDF, i, "gap=" + str(gapSize))
cDF.loc[i, ["est.gapSize"]] = gapSize
else:
cDF.loc[i, ["est.type"]] = "UNCERTAIN"
cDF = addAnnotation(cDF, i, "unable-to-impute-tzo")
else:
cDF["est.type"] = "UNCERTAIN"
cDF["est.annotations"] = "unable-to-impute-tzo"
return cDF
def reorderColumns(cDF):
cDF = cDF[["pump.upload.imputed.timezoneOffset",
"pump.upload.imputed.timezone",
"pump.upload.imputed.timeProcessing",
"cgm.upload.imputed.timezoneOffset",
"cgm.upload.imputed.timezone",
"cgm.upload.imputed.timeProcessing",
"healthkit.upload.imputed.timezoneOffset",
"healthkit.upload.imputed.timezone",
"healthkit.upload.imputed.timeProcessing",
"home.imputed.timezoneOffset",
"home.imputed.timezone",
"home.imputed.timeProcessing",
"upload.timezoneOffset",
"upload.timezone",
"upload.timeProcessing",
"cgm.timezoneOffset",
"pump.timezoneOffset",
"date",
"est.type",
"est.timezoneOffset",
"est.timezone",
"est.timeProcessing",
"est.annotations",
"est.gapSize",
"est.version"]]
return cDF
def readXlsxData(xlsxPathAndFileName):
# load xlsx
df = pd.read_excel(xlsxPathAndFileName, sheet_name=None, ignore_index=True)
cdf = pd.concat(df.values(), ignore_index=True)
cdf = cdf.set_index('jsonRowIndex')
return cdf
def checkInputFile(inputFile):
if os.path.isfile(inputFile):
if os.stat(inputFile).st_size > 1000:
if inputFile[-4:] == "json":
inputData = pd.read_json(inputFile, orient="records")
fileName = os.path.split(inputFile)[-1][:-5]
elif inputFile[-4:] == "xlsx":
inputData = readXlsxData(inputFile)
fileName = os.path.split(inputFile)[-1][:-5]
elif inputFile[-3:] == "csv":
inputData = | pd.read_csv(inputFile, low_memory=False) | pandas.read_csv |
import sys
import pickle as pkl
# import libraries
import nltk
from nltk.corpus import stopwords
nltk.download(['punkt', 'wordnet'])
st = set(stopwords.words('english'))
import re
import time
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.multioutput import MultiOutputClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.metrics import classification_report, accuracy_score
from sklearn.svm import LinearSVC
def load_data(database_filepath):
"""
Loads data from database
Input - database_filepath: filepath to sqlite database
Output - X, y: Pandas DataFrames with Data and labels for training.
"""
# get table name from filepath
table = database_filepath.split('/')[-1].split('.')[0]
engine = create_engine(f'sqlite:///{database_filepath}')
df = pd.read_sql_table(table, engine)
X = df['message']
y = df.drop(['id', 'message', 'original', 'genre'], axis=1)
return X, y
def tokenize(text):
"""
Tokenize with NLTK and removes URLs
Input - text - Single string object with english message
Output - list of lowercase, lemmatized word tokens
"""
# Regex string to match URLs
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
# get list of all urls using regex
detected_urls = re.findall(url_regex, text)
# replace each url in text string with placeholder
for url in detected_urls:
text = text.replace(url, 'urlplaceholder')
# Remove Punctiation and other characters
text = re.sub('[^a-zA-Z0-9]', ' ', text)
tokens = word_tokenize(text) # Tokenize block of text
lemmatizer = WordNetLemmatizer() # Initialize Lemmatizer
clean_tokens = []
for tok in tokens:
# lemmatize, normalize case, and remove leading/trailing white space
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
#remove stopwords
clean_tokens = [x for x in clean_tokens if x not in list(st)]
clean_tokens = [x for x in clean_tokens if x not in ['said', 'wa', 'ha', 'u', '000']]
return clean_tokens
def build_model():
"""
Builds an Sklearn Pipeline with a countVectorizer, TF-IDF Transformer and Linear Support Vector Classifier object.
Input - None
Output - Grid Search Cross Validation object with 3 stratified folds to balance target class to distrobution.
"""
pipeline = Pipeline([('vect', CountVectorizer(tokenizer=tokenize)), ('tfidf', TfidfTransformer(use_idf=False)),
('clf', MultiOutputClassifier(LinearSVC()))])
parameters = {
'vect__max_df': (.45, .5, .65),
'vect__ngram_range': ((1, 1), (1, 2)),
'clf__estimator__C': [.45, .5, .65]
}
model = GridSearchCV(pipeline, param_grid=parameters, verbose=3, cv=4)
return model
def evaluate_model(model, X_test, y_test):
"""
Function to gather basic results for printing to standard out.
Input - Model : trained model object
X_test : Unseen Input features to evaluate model
y_test : Unseen labels to evaluate model
Output - Pandas dataframe with 'precision', 'recall',
'f1-score', 'support', and 'accuracy' for each class
"""
y_pred = model.predict(X_test)
results_df = pd.DataFrame(columns=['precision', 'recall', 'f1-score', 'support', 'accuracy'])
for index, column in enumerate(y_test.columns):
cr_dict = classification_report(y_test[column],
y_pred[:, index],
output_dict=True,
labels=np.unique(y_pred[:, index]))
cr_dict['weighted avg']['accuracy'] = accuracy_score(y_test[column], y_pred[:, index])
results_df = results_df.append(pd.DataFrame(index=[column], data=cr_dict['weighted avg']))
return results_df
def save_model(model, model_filepath):
"""
Saves model as pickle object.
Input - model : model object
model_filepath : filepath destination for output
Output - None, file stored
"""
pkl.dump(model, open(model_filepath, 'wb'))
pass
def build_word_freq(X, y):
"""
Builds a csv table with top 20 most frequent words for each target.
To be used in visualization to demonstrate NLP functionality
Input - X message feature associated with the model
y label features associated with the model
Output - keywords.csv stored in 'data' directory
"""
dff = pd.concat([X, y], axis=1)
corpus = []
corpus_names = []
for _ in dff.columns[4:]:
corpus.append(dff.loc[dff[_] == 1]['message'].str.cat(sep=' '))
corpus_names.append(_)
vectorizer = Pipeline([('vect', CountVectorizer(tokenizer=tokenize)), ('tfidf', TfidfTransformer())])
vectors = vectorizer.fit_transform(corpus)
names = vectorizer.named_steps['vect'].get_feature_names()
data = vectors.todense().tolist()
# Create a dataframe with the results
keywords_ = | pd.DataFrame(data, columns=names) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 5 19:42:05 2018
@author: <NAME>
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import datetime
def read_chat(file):
"""
Reads in the Chat and creates a df from it
"""
# List to save all the dictionaries
save_list = []
# Loop through the file
with open(file, "r", encoding="utf8") as infile:
for line in infile:
# Split the file in what is presumably a datetime and a message
split_comma = line.split(",")
# Split the thing that should be the datetime into presumably date and time
split_whitespace = split_comma.pop(0).split(" ")
# Determine if the first part of split white is a date by trying to convert it
try:
date = datetime.datetime.strptime(split_whitespace[0], '%d.%m.%Y')
# Skip the line if a value error is raised, as this means that it
# is not a date
except ValueError:
continue
# Dictionary to save the results of the line
# Stich the rest of the message togeher again
message_with_person = " ".join(split_comma)
# Get person and message
person, message = seperate_messsage_person(message_with_person)
# Shorten Person
person = person[:2]
# Exclude entries where the person is empty
if person == "":
continue
# Add a better date describer and make the month better sortable
month = date.month if date.month > 9 else "0" + str(date.month)
month_year = str(date.year) + "/" + str(month)
# Determine what the message consists of
topic = determine_message_topic(message)
# Save the things already known and prepare the other ones
save_dict = {"date": date, "person": person, "topic": topic,
"year/month": month_year}
# Save the topic
save_list.append(save_dict)
return create_dataframe(save_list)
def create_dataframe(save_list):
"""
Creates a dataframe from the collected data
"""
df = pd.DataFrame(save_list)
df.set_index("date", inplace=True)
return df
def seperate_messsage_person(message_with_person):
"""
Seperates the person from the message and returns both
"""
# Split by the first colon
split_colon = message_with_person.split(":")
# Get the person out of it and avoid the first whitespace
person = split_colon.pop(0)[1:]
# Stitch the message together again
message = " ".join(split_colon)
return person, message
def determine_message_topic(message):
"""
Determines whats in the message and returns a string with the name of the
topic.
"""
if "[[" not in message:
return "text"
lookup_dict = {"Document": "file", "Photo": "pic",
"Voice": "voice_msg", "Sticker": "sticker",
"Webpage": "link", "GIF": "gif", "Video": "video",
"Contact": "contact", "Geo": "location"}
# Get ride of rest of the message
msg_type = message.split("]]")[0]
# Get rid of leading whitespace
msg_type = msg_type[1:]
# Delete the leading parantheses
msg_type = msg_type.replace("[[", "")
# Get ride if weird sticker emojis
if "Sticker" in msg_type:
msg_type = msg_type.split(" ")[1]
# Get rid of additonal type information
msg_type = msg_type.split(" ")[0]
# Return the correct type
return lookup_dict[msg_type]
def plot_whole_timeseries_by_type_person(chat_df):
"""Plots lineplots for all different types and persons"""
grouped = chat_df.groupby(["topic", "person", "year/month"])
amounts_per_month = grouped.size()
persons = chat_df["person"].unique()
topics = chat_df["topic"].unique()
fig, subplots = plt.subplots(nrows=len(topics), sharex=True)
for i, topic in enumerate(topics):
print(topic)
ax = subplots[i]
ax.set_title(topic.title())
for person in persons:
print(person)
# This will fail if the person has never done this kind of message
# Therefore create an empty dataframe for the person
try:
amounts_per_person_topic = pd.DataFrame(amounts_per_month.loc[(topic, person)].sort_index())
except:
amounts_per_person_topic = pd.DataFrame(0, index=sorted(chat_df["year/month"].unique()), columns=[0])
# Create an empty dataframe with all dates of the original df,
# So plottin is easier and add all the values from the other df
plot_df = pd.DataFrame(0, index=sorted(chat_df["year/month"].unique()), columns=["count"])
plot_df = | pd.merge(plot_df, amounts_per_person_topic, left_index=True, right_index=True, how="left") | pandas.merge |
import numpy as np
import pandas as pd
import pymongo
from sklearn.preprocessing import StandardScaler
from tensorflow.keras.models import load_model
import os
import glob
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
model_1 = load_model('model_2249')
model_2 = load_model('model_5699')
class Pipeline:
'''Provides daily results to the TradingHydro application.
Propagates data from the web, via inference, to a database.
Takes in some csv document and gives back numerous updates to databse.
'''
def __init__(self, csv_filename=False, no_scraping=False):
'''Initializes Pipeline variables.
Attributes:
url: str, api key for mongodb.
csv_filename: str with the csv filename.
no_scraping: bool, True as default, False means no scraping.
DF_out: pandas DataFrame, csv transition data.
model_1_trans: dict with transition data.
model_2_trans: dict with transition data.
'''
# strings
self.url = 'your-mongodb-api-key'
self.csv_filename = 'csv_filename.csv'
# debugging
if csv_filename:
self.csv_filename = csv_filename
self.no_scraping = no_scraping
# transitions
self.DF_out = None
self.model_1_trans = None
self.model_2_trans = None
def db_health_checking(self):
'''Checks for mismatchs and doubles in the plot collections.'''
# set database
client = pymongo.MongoClient(self.url)
db = client['powercell']
# name collection vars so they correspond with mongodb col names
plot_1 = db['plot_1']
plot_2 = db['plot_2']
plot_3 = db['plot_3']
plot_4 = db['plot_4']
# find the current data in respective collection
querys = [plot_1.find_one(),
plot_3.find_one(),
plot_4.find_one()]
# clean out mongodb id object
querys_no_id = [{i: query[i] for i in ['dates', 'lineseries']} for query in querys]
# compare lens
for name, query in zip(('plot_1', 'plot_3', 'plot_4'), querys_no_id):
lens = [len(query['dates'])]
lens = lens + [len(query['lineseries'][i]['points']) for i in range(len(query))]
assert len(set(lens)) == 1, 'Health issue, len mismatch in plot ' + name
return True
def scraping(self):
'''Downloads a csv file from the web to disk.
Returns:
bool, True if procedure is successful.
'''
# PREPARE FOR SCRAPE
# locate yesterdays csv file in folder
csvfiles = [file for file in glob.glob('*.csv')]
assert len(csvfiles) == 1, 'Prep for scrape, more or less than one csv on disk.'
# remove csv
os.remove(csvfiles[0])
assert len([file for file in glob.glob('*.csv')]) == 0, 'Remove csv, still csv on disk.'
# SELENIUM
# strings
url = 'http://www.nasdaqomxnordic.com/shares/microsite?Instrument=SSE105121'
user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 ' \
'(KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36'
# options
chrome_options = Options()
chrome_options.add_argument('--user-agent=' + user_agent)
chrome_options.add_argument('--headless')
# download location
download_dir = os.path.dirname(os.path.realpath('__file__'))
prefs = {'download.default_directory' : download_dir}
chrome_options.add_experimental_option('prefs', prefs)
# wait, launch browser and wait
time.sleep(np.random.randint(1, 120))
driver = webdriver.Chrome(options=chrome_options)
driver.implicitly_wait(np.random.randint(3, 15))
# go to page and wait
driver.get(url)
driver.implicitly_wait(np.random.randint(3, 15))
# find showhistory button wait and click
show_history_class = driver.find_element_by_class_name('showHistory')
show_history_class.click()
driver.implicitly_wait(np.random.randint(3, 15))
# find, click, download csv and wait
exportExcel_id = driver.find_element(By.ID, 'exportExcel')
exportExcel_id.click()
time.sleep(5)
# POST SCRAPE
# change name on csv file and wait
csvfiles = [file for file in glob.glob('*.csv')]
assert len(csvfiles) == 1, 'Post scrape, more or less than one csv on disk.'
os.rename(csvfiles[0], self.csv_filename)
time.sleep(5)
return True
def preprocessing(self):
'''Preprocess new data wrt old and slice off last.
Returns:
date: str with todays date.
x1_s2: numpy array, x1 part of s2.
p_t: float, yesterdays price t-1 for todays calculations.
c_: float with todays closing price return.
price: float, powercell raw price for today.
ma_26: float, TA indicator.
em_12: float, TA indicator.
em_26: float, TA indicator.
'''
names = ['date', 'price', 'avg_p', 'bid', 'ask',
'o', 'h', 'l', 'c', 'avgp', 'vol', 'oms', 'num']
# put scraped csv in dataframe and obtain the last row
df_scraped = pd.read_csv(self.csv_filename, sep=';', header=1).iloc[:,:1]
df_scraped[[1, 2]] = pd.read_csv(self.csv_filename, sep=';', header=1).iloc[:,6:8]
df_scraped = pd.concat([df_scraped, pd.read_csv(
self.csv_filename, sep=';', header=1).iloc[:,:-1].drop(
columns=['Date'])], axis=1).iloc[::-1].reset_index().drop(columns='index')
df_scraped.columns = names
scraped_row = df_scraped.iloc[[-1]]
# dataframe (DF) related database (DB) and collection
client = pymongo.MongoClient(self.url)
db_DF = client['DF']
DF = db_DF['DF']
# fetch yesterdays DF
df_in = db_DF.DF.find_one(sort=[('_id', pymongo.DESCENDING)])
df_in_json = pd.read_json(df_in[list(df_in.keys())[-1]], keep_default_dates=False)
# concatenate yesterdays DF and the scraped row
df = pd.concat([df_in_json, scraped_row], axis=0).reset_index().drop(columns='index')
# store now but update later
self.df_out = | pd.concat([df_in_json, scraped_row], axis=0) | pandas.concat |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/10/23 0023
# @Author : justin.郑 <EMAIL>
# @File : index_toutiao.py
# @Desc : 头条指数
import json
import pandas as pd
import requests
from gopup.index.cons import index_toutiao_headers
def toutiao_index(keyword="python", start_date="20201016", end_date="20201022", app_name="toutiao"):
"""
头条指数数据
:param keyword: 关键词
:param start_date: 开始日期
:param end_date: 截止日期
:param app_name: 平台
:return:
datetime 日期
index 指数
"""
# list_keyword = '["%s"]' % keyword
try:
url = "https://trendinsight.oceanengine.com/api/open/index/get_multi_keyword_hot_trend"
data = {
"keyword_list": [keyword],
"start_date": start_date,
"end_date": end_date,
"app_name": app_name
}
res = requests.post(url, json=data, headers=index_toutiao_headers)
hot_list = json.loads(res.text)['data']['hot_list'][0]['hot_list']
df = | pd.DataFrame(hot_list) | pandas.DataFrame |
# Copyright 2019 Proyectos y Sistemas de Mantenimiento SL (eProsima).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""."""
import argparse
import logging
from os import listdir
from os.path import abspath
from os.path import isdir
from os.path import isfile
import numpy as np
import pandas
def directory_type(directory):
"""
Check whether the argument is a directory.
:param directory: The directory path.
:return: The directory path without ending /.
Exit if the directory cannot be found.
"""
if directory.endswith('/'):
directory = directory[:-1]
if not isdir(directory):
logger.error('Cannot find {}'.format(directory))
exit(1)
return directory
def experiment_type(filename):
"""
Get experiment type of a summary file based on its name.
Get experiment type of a summary file based on its name (as output by
"throughput_process_results.py).
:param filename: The name of the summary file.
:raise: AssertionError if filename is not a string.
:return: The experiment type as a string.
"""
assert(isinstance(filename, str))
exp_type = filename.split('/')[-1].split('.')[-2].split('_')[1:-1]
exp_type = '_'.join(exp_type)
logger.debug('{} is of type {}'.format(filename, exp_type))
return exp_type
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="""
Script to determine throughput requirements based on a set of
experiment results. The scripts takes an <experiment_results>
directory and creates a CSV file with requirements for each payload
and experiment type present. The <experiment_results> directory is
expected to contain directories with experiment results as output
by "throughput_run_experiment.bash", and summaries created with
"throughput_process_results.py". Each requirement is set by the 99
percentile of the results for a given payload and experiment type.
The scripts generate requirement for lost samples, and subscription
throughput.
"""
)
parser.add_argument(
'-e',
'--experiments_results',
help='The directory containing the results of all the experiments',
required=True
)
parser.add_argument(
'-o',
'--output_file',
help='A file to write the requirements',
required=False,
default='throughput_requirements.csv'
)
parser.add_argument(
'--debug',
action='store_true',
help='Set logging level to debug.'
)
args = parser.parse_args()
# Create a custom logger
logger = logging.getLogger('THROUGHPUT.DETERMINE.REQUIREMENTS')
# Create handlers
c_handler = logging.StreamHandler()
# Create formatters and add it to handlers
c_format = (
'[%(asctime)s][%(filename)s:%(lineno)s][%(funcName)s()]' +
'[%(levelname)s] %(message)s'
)
c_format = logging.Formatter(c_format)
c_handler.setFormatter(c_format)
# Add handlers to the logger
logger.addHandler(c_handler)
# Set log level
if args.debug is True:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
experiments = abspath(directory_type(args.experiments_results))
output_file = abspath(args.output_file)
# Validate arguments
assert(isdir(experiments))
logger.debug('Geting results directories')
results_dirs = sorted(
[
'{}/{}'.format(
experiments,
d
) for d in listdir(experiments) if isdir(
'{}/{}'.format(
experiments,
d
)
)
]
)
logger.debug(results_dirs)
# Supported experiment types
experiment_types = [
'interprocess_best_effort',
'interprocess_best_effort_shm',
'interprocess_best_effort_security',
'interprocess_best_effort_shm_security',
'interprocess_best_effort_tcp',
'interprocess_best_effort_tcp_security',
'interprocess_reliable',
'interprocess_reliable_shm',
'interprocess_reliable_security',
'interprocess_reliable_shm_security',
'interprocess_reliable_tcp',
'interprocess_reliable_tcp_security',
'intraprocess_best_effort',
'intraprocess_reliable',
]
# Create a dictionary with one entry per experiment type. The values are
# Pandas DataFrames:
# {
# 'experiment_type_1': <Pandas DataFrame>,
# 'experiment_type_2': <Pandas DataFrame>
# }
# The DataFrames contain a "Experiment" column to keep track of from which
# experiment does every data entry come from.
data_by_exp_type = {}
for results_dir in results_dirs:
# Get path of summary files
logger.debug('Geting summaries for {}'.format(results_dir))
results_files = sorted(
[
'{}/{}'.format(
results_dir,
f
) for f in listdir(results_dir) if isfile(
'{}/{}'.format(
results_dir,
f
)
) and 'summary' in f
]
)
logger.debug('Summaries: {}'.format(results_files))
# Iterate over the summaries
for f in results_files:
# Get experiment type
exp_type = experiment_type(f)
# Check that supported
if exp_type not in experiment_types:
logger.error(
'Experiment {} found in {} is NOT supported'.format(
exp_type,
results_dir
)
)
exit(1)
# Initialize dictionary entry
if exp_type not in data_by_exp_type:
data_by_exp_type[exp_type] = pandas.DataFrame()
# Load data as DataFrame
logger.debug('Loading data from {}'.format(f))
file_data = pandas.read_csv(f)
# Expand data with a "Experiment" column containing directory name
# of experiment results.
file_data['Experiment'] = results_dir.split('/')[-1]
# Append data to
data_by_exp_type[exp_type] = data_by_exp_type[exp_type].append(
file_data,
sort=False
)
logger.debug('Data by experiment type: {}'.format(data_by_exp_type))
# Requirement columns and percentile used to derive requirements
req_columns = {
'Lost [samples]': 99,
'Subscription throughput [Mb/s]': 99,
}
# Derive requirements for each experiment type, payload, and req_column
# based on the percentiles. Store them in a DataFrame in the form:
# Experiment Payload [Bytes] Lost [samples] Subscription throughput [Mb/s]
# 0 interprocess_best_effort_security 16 0.00 8.62310
# 1 interprocess_best_effort_security 1024 144.54 547.46745
requirements = pandas.DataFrame()
for exp_type in data_by_exp_type:
# Get experiment_type data
exp_data = data_by_exp_type[exp_type].reset_index(drop=True)
exp_requirements = pandas.DataFrame()
# Iterate over payloads
payloads = exp_data['Payload [Bytes]'].unique()
for payload in payloads:
# Get payload data
payload_data = exp_data[exp_data['Payload [Bytes]'] == payload]
# Iterate over requirement columns
payload_reqs = pandas.DataFrame()
for c in req_columns:
# Calculate percentile and insert entry
payload_reqs.at[0, c] = np.percentile(
payload_data[c],
req_columns[c]
)
payload_reqs.insert(0, 'Payload [Bytes]', payload)
exp_requirements = exp_requirements.append(payload_reqs)
exp_requirements.insert(0, 'Experiment type', exp_type)
requirements = requirements.append(exp_requirements)
# Save requirements as CSV file
requirements = requirements.reset_index(drop=True)
requirements.to_csv(output_file, float_format='%.3f', index=False)
# Print the requirement
| pandas.set_option('display.max_rows', None) | pandas.set_option |
from datetime import timedelta
from functools import partial
import itertools
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.loaders.earnings_estimates import (
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import pytest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date, sids, tuples, end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples, columns=[SID_FIELD_NAME, "estimate", "knowledge_date"])
df = df.pivot_table(
columns=SID_FIELD_NAME, values="estimate", index="knowledge_date", dropna=False
)
df = df.reindex(pd.date_range(start_date, end_date))
# Index name is lost during reindex.
df.index = df.index.rename("knowledge_date")
df["at_date"] = end_date.tz_localize("utc")
df = df.set_index(["at_date", df.index.tz_localize("utc")]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp("2014-12-28", tz="utc")
END_DATE = pd.Timestamp("2015-02-04", tz="utc")
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError("make_loader")
@classmethod
def make_events(cls):
raise NotImplementedError("make_events")
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days,
self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
"s" + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(
cls.events, {column.name: val for column, val in cls.columns.items()}
)
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate1": [1.0, 2.0],
"estimate2": [3.0, 4.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def make_expected_out(cls):
raise NotImplementedError("make_expected_out")
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp("2015-01-15", tz="utc"),
end_date=pd.Timestamp("2015-01-15", tz="utc"),
)
assert_frame_equal(results.sort_index(1), self.expected_out.sort_index(1))
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-10"),
"estimate1": 1.0,
"estimate2": 3.0,
FISCAL_QUARTER_FIELD_NAME: 1.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-20"),
"estimate1": 2.0,
"estimate2": 4.0,
FISCAL_QUARTER_FIELD_NAME: 2.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
dummy_df = pd.DataFrame(
{SID_FIELD_NAME: 0},
columns=[
SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
"estimate",
],
index=[0],
)
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {
c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1, bad_dataset2, good_dataset)
for c in dataset.columns
}
p = Pipeline(columns)
err_msg = (
r"Passed invalid number of quarters -[0-9],-[0-9]; "
r"must pass a number of quarters >= 0"
)
with pytest.raises(ValueError, match=err_msg):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with pytest.raises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = [
"split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof",
]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(
itertools.product(
(
NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
),
)
)
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
with pytest.raises(ValueError):
loader(
dummy_df,
{column.name: val for column, val in columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"),
)
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp("2015-01-28", tz="utc")
q1_knowledge_dates = [
pd.Timestamp("2015-01-01"),
pd.Timestamp("2015-01-04"),
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-11"),
]
q2_knowledge_dates = [
pd.Timestamp("2015-01-14"),
pd.Timestamp("2015-01-17"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-23"),
]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
] # One day late
q2_release_dates = [
pd.Timestamp("2015-01-25"), # One day early
pd.Timestamp("2015-01-26"),
]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates + cls.q2_knowledge_dates, 4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (
q1e1 < q1e2
and q2e1 < q2e2
# All estimates are < Q2's event, so just constrain Q1
# estimates.
and q1e1 < cls.q1_release_dates[0]
and q1e2 < cls.q1_release_dates[0]
):
sid_estimates.append(
cls.create_estimates_df(q1e1, q1e2, q2e1, q2e2, sid)
)
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates + sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-26")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-26"),
],
"estimate": [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid,
}
)
@classmethod
def create_estimates_df(cls, q1e1, q1e2, q2e1, q2e2, sid):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
"estimate": [0.1, 0.2, 0.3, 0.4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
}
)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert sid_estimates.isnull().all().all()
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[
self.get_expected_estimate(
q1_knowledge[
q1_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
q2_knowledge[
q2_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
date.tz_localize(None),
).set_index([[date]])
for date in sid_estimates.index
],
axis=0,
)
sid_estimates.index = all_expected.index.copy()
assert_equal(all_expected[sid_estimates.columns], sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q2_knowledge.iloc[-1:]
elif (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate": [1.0, 2.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(
columns=[cls.columns[col] + "1" for col in cls.columns]
+ [cls.columns[col] + "2" for col in cls.columns],
index=cls.trading_days,
)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ("1", "2")
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(expected[expected_name])
else:
expected[expected_name] = expected[expected_name].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge(
[
{c.name + "1": c.latest for c in dataset1.columns},
{c.name + "2": c.latest for c in dataset2.columns},
]
)
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + "1" for col in self.columns]
q2_columns = [col.name + "2" for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(
sorted(np.array(q1_columns + q2_columns)), sorted(results.columns.values)
)
assert_equal(
self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1),
)
class NextEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp(
"2015-01-11", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp("2015-01-11", tz="UTC") : pd.Timestamp(
"2015-01-20", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ["estimate", "event_date"]:
expected.loc[
pd.Timestamp("2015-01-06", tz="UTC") : pd.Timestamp(
"2015-01-10", tz="UTC"
),
col_name + "2",
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-09", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 2
expected.loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 3
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_YEAR_FIELD_NAME + "2",
] = 2015
return expected
class PreviousEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp(
"2015-01-19", tz="UTC"
)
] = cls.events[raw_name].iloc[0]
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ["estimate", "event_date"]:
expected[col_name + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[col_name].iloc[0]
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 4
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 2014
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 1
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 2015
return expected
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
]
* 2,
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-20"),
],
"estimate": [11.0, 12.0, 21.0] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6,
}
)
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError("assert_compute")
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=pd.Timestamp("2015-01-13", tz="utc"),
# last event date we have
end_date=pd.Timestamp("2015-01-14", tz="utc"),
)
class PreviousVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp("2015-02-10", tz="utc")
window_test_start_date = pd.Timestamp("2015-01-05")
critical_dates = [
pd.Timestamp("2015-01-09", tz="utc"),
pd.Timestamp("2015-01-15", tz="utc"),
pd.Timestamp("2015-01-20", tz="utc"),
pd.Timestamp("2015-01-26", tz="utc"),
pd.Timestamp("2015-02-05", tz="utc"),
pd.Timestamp("2015-02-10", tz="utc"),
]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-02-10"),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp("2015-01-18"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-04-01"),
],
"estimate": [100.0, 101.0] + [200.0, 201.0] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
}
)
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-15"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-02-05"),
pd.Timestamp("2015-02-05"),
],
"estimate": [110.0, 111.0] + [310.0, 311.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10,
}
)
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-07"),
cls.window_test_start_date,
pd.Timestamp("2015-01-17"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
],
"estimate": [120.0, 121.0] + [220.0, 221.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20,
}
)
concatted = pd.concat(
[sid_0_timeline, sid_10_timeline, sid_20_timeline]
).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [
sid for i in range(len(sids) - 1) for sid in range(sids[i], sids[i + 1])
] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids(),
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(
self, start_date, num_announcements_out
):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date)
- self.trading_days.get_loc(self.window_test_start_date)
+ 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = (
timelines[num_announcements_out]
.loc[today]
.reindex(trading_days[: today_idx + 1])
.values
)
timeline_start_idx = len(today_timeline) - window_len
assert_almost_equal(estimate, today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp("2015-02-10", tz="utc"),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-20"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-21"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111, pd.Timestamp("2015-01-22")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, pd.Timestamp("2015-02-10")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 221, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_previous = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-02-09")
]
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-02-10")),
(10, np.NaN, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
)
]
)
return {1: oneq_previous, 2: twoq_previous}
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-09"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp("2015-01-20")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-20"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-01-22")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 310, pd.Timestamp("2015-01-09")),
(10, 311, pd.Timestamp("2015-01-15")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-23", "2015-02-05")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-02-06", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(0, 201, pd.Timestamp("2015-02-10")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-11")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-16")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-01-20"),
)
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-02-10")
]
)
return {1: oneq_next, 2: twoq_next}
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp("2015-01-14")
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-09"),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp("2015-01-20"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
],
"estimate": [130.0, 131.0, 230.0, 231.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30,
}
)
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-15")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [140.0, 240.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40,
}
)
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), | pd.Timestamp("2015-01-12") | pandas.Timestamp |
"""
SIR 3S Logfile Utilities (short: Lx)
"""
__version__='192.168.3.11.dev1'
import os
import sys
import logging
logger = logging.getLogger(__name__)
import argparse
import unittest
import doctest
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors.execute import CellExecutionError
import timeit
import xml.etree.ElementTree as ET
import re
import struct
import collections
import zipfile
import py7zr
import pandas as pd
import h5py
import subprocess
import csv
import glob
import warnings
#warnings.simplefilter(action='ignore', category=PerformanceWarning)
# pd.set_option("max_rows", None)
# pd.set_option("max_columns", None)
# pd.reset_option('max_rows')
# ...
class LxError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def fTCCast(x):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
v=x
try:
if x in ['true','True']:
v=1
elif x in ['false','False','']:
v=0
else:
try:
v = float(x)
except Exception as e:
#logStrTmp="{:s}{!s:s}: Konvertierung zu float schlaegt fehl! - Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,x,sys.exc_info()[-1].tb_lineno,type(e),str(e))
#logger.debug(logStrTmp)
try:
v = pd.to_numeric(x,errors='raise',downcast='float')
#logStrTmp="{:s}{!s:s}: Konvertierung mit pd.to_numeric liefert: {!s:s}".format(logStr,x,v)
#logger.debug(logStrTmp)
except Exception as e:
#logStrTmp="{:s}{!s:s}: Konvertierung zu float mit pd.to_numeric schlaegt auch fehl! - Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,x,sys.exc_info()[-1].tb_lineno,type(e),str(e))
#logger.debug(logStrTmp)
#x='2021-04-20 10:56:12.000'
#t = pd.Timestamp(x)
#t # Timestamp('2021-04-20 10:56:12')
#i=int(t.to_datetime64())/1000000000
#i # 1618916172.0
#pd.to_datetime(i,unit='s',errors='coerce'): Timestamp('2021-04-20 10:56:12')
try:
t = pd.Timestamp(x)
i=int(t.to_datetime64())/1000000000
v=pd.to_numeric(i,errors='raise',downcast='float')
except Exception as e:
logStrTmp="{:s}{!s:s}: Konvertierung zu float (mit pd.to_numeric) schlaegt (auch nach Annahme vaulue=Zeitstring) fehl! - Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,x,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.debug(logStrTmp)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return v
def getTCsOPCDerivative(TCsOPC,col,shiftSize,windowSize,fct=None):
"""
returns a df
index: ProcessTime
cols:
col
dt
dValue
dValueDt
dValueDtRollingMean
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
mDf=pd.DataFrame()
try:
s=TCsOPC[col].dropna()
mDf=pd.DataFrame(s)
dt=mDf.index.to_series().diff(periods=shiftSize)
mDf['dt']=dt
mDf['dValue']=mDf[col].diff(periods=shiftSize)
mDf=mDf.iloc[shiftSize:]
mDf['dValueDt']=mDf.apply(lambda row: row['dValue']/row['dt'].total_seconds(),axis=1)
if fct != None:
mDf['dValueDt']=mDf['dValueDt'].apply(fct)
mDf['dValueDtRollingMean']=mDf['dValueDt'].rolling(window=windowSize).mean()
mDf=mDf.iloc[windowSize-1:]
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return mDf
logFilenamePattern='([0-9]+)(_)+([0-9]+)(\.log)' # group(3) ist Postfix und Nr.
logFilenameHeadPattern='([0-9,_]+)(\.log)' # group(1) ist Head und H5-Key
# nicht alle IDs werden von RE pID erfasst
# diese werden mit pID2, getDfFromODIHelper und in getDfFromODI "nachbehandelt"
pID=re.compile('(?P<Prae>IMDI\.)?(?P<A>[a-z,A-Z,0-9,_]+)\.(?P<B>[a-z,A-Z,0-9,_]+)\.(?P<C1>[a-z,A-Z,0-9]+)_(?P<C2>[a-z,A-Z,0-9]+)_(?P<C3>[a-z,A-Z,0-9]+)_(?P<C4>[a-z,A-Z,0-9]+)_(?P<C5>[a-z,A-Z,0-9]+)(?P<C6>_[a-z,A-Z,0-9]+)?(?P<C7>_[a-z,A-Z,0-9]+)?\.(?P<D>[a-z,A-Z,0-9,_]+)\.(?P<E>[a-z,A-Z,0-9,_]+)(?P<Post>\.[a-z,A-Z,0-9,_]+)?')
pID2='(?P<Prae>IMDI\.)?(?P<A>[a-z,A-Z,0-9,_]+)(?P<Post>\.[a-z,A-Z,0-9,_]+)?'
def getDfFromODIHelper(row,col,colCheck,pID2=pID2):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
if not pd.isnull(row[colCheck]):
res= row[col]
resStr='ColCheckOk'
elif pd.isnull(row[col]):
res=re.search(pID2,row['ID']).group(col)
if res != None:
resStr='ColNowOk'
else:
resStr='ColStillNotOk'
else:
res = row[col]
resStr='ColWasOk'
except:
res = row[col]
resStr='ERROR'
finally:
if resStr not in ['ColCheckOk','ColNowOk']:
logger.debug("{:s}col: {:s} resStr: {:s} row['ID']: {:s} res: {:s}".format(logStr,col, resStr,row['ID'],str(res)))
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return res
def getDfFromODI(ODIFile,pID=pID):
"""
returns a defined df from ODIFile
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfID=None
try:
df=pd.read_csv(ODIFile,delimiter=';')
s = pd.Series(df['ID'].unique())
dfID=s.str.extract(pID.pattern,expand=True)
dfID['ID']=s
dfC=dfID['C1']+'_'+dfID['C2']+'_'+dfID['C3']+'_'+dfID['C4']+'_'+dfID['C5']+'_'+dfID['C6']#+'_'+dfID['C7']
dfID.loc[:,'C']=dfC.values
dfID['C']=dfID.apply(lambda row: row['C']+'_'+row['C7'] if not pd.isnull(row['C7']) else row['C'],axis=1)
dfID=dfID[['ID','Prae','A','B','C','C1','C2','C3','C4','C5','C6','C7','D','E','Post']]
for col in ['Prae','Post','A']:
dfID[col]=dfID.apply(lambda row: getDfFromODIHelper(row,col,'A'),axis=1)
dfID.sort_values(by=['ID'], axis=0,ignore_index=True,inplace=True)
dfID.set_index('ID',verify_integrity=True,inplace=True)
dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN','Post']='.EIN'
dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN','A']='Objects'
dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN','B']='3S_XYZ_PUMPE'
dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN','C']='3S_XYZ_GSI_01'
dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN','D']='Out'
#dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN',:]
dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW','Post']='.SOLLW'
dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW','A']='Objects'
dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW','B']='3S_XYZ_RSCHIEBER'
dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW','C']='3S_XYZ_PCV_01'
dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW','D']='Out'
#dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW',:]
dfID['yUnit']=dfID.apply(lambda row: getDfFromODIHelperyUnit(row),axis=1)
dfID['yDesc']=dfID.apply(lambda row: getDfFromODIHelperyDesc(row),axis=1)
dfID=dfID[['yUnit','yDesc','Prae','A','B','C','C1','C2','C3','C4','C5','C6','C7','D','E','Post']]
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfID
def addInitvalueToDfFromODI(INITFile,dfID):
"""
returns dfID extended with new Cols Initvalue and NumOfInits
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfIDext=dfID
try:
df=pd.read_csv(INITFile,delimiter=';',header=None,names=['ID','Value'])#,index_col=0)
dfGrped=df.groupby(by=['ID'])['Value'].agg(['count','min','max','mean','last'])
dfIDext=dfID.merge(dfGrped,left_index=True,right_index=True,how='left').filter(items=dfID.columns.to_list()+['last','count']).rename(columns={'last':'Initvalue','count':'NumOfInits'})
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfIDext
def fODIMatch(dfODI,TYPE=None,OBJTYPE=None,NAME1=None,NAME2=None):
df=dfODI
if TYPE != None:
df=df[df['TYPE']==TYPE]
if OBJTYPE != None:
df=df[df['OBJTYPE']==OBJTYPE]
if NAME1 != None:
df=df[df['NAME1']==NAME1]
if NAME2 != None:
df=df[df['NAME2']==NAME2]
return df
def fODIFindAllSchieberSteuerungsIDs(dfODI,NAME1=None,NAME2=None): # dfODI: pd.read_csv(ODI,delimiter=';')
df=fODIMatch(dfODI,TYPE='OL_2',OBJTYPE='VENT',NAME1=NAME1,NAME2=NAME2)
return sorted(list(df['ID'].unique())+[ID for ID in df['REF_ID'].unique() if not pd.isnull(ID)])
def fODIFindAllZeilenWithIDs(dfODI,IDs):
return dfODI[dfODI['ID'].isin(IDs) | dfODI['REF_ID'].isin(IDs)]
def getDfFromODIHelperyUnit(row):
"""
returns Unit
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
unit=None
try:
if row['E'] in ['AL_S','SB_S']:
unit='[-]'
elif row['E'] in ['LR_AV','LP_AV','QD_AV','SD_AV','AM_AV','FZ_AV','MZ_AV','NG_AV']:
unit='[Nm³/h]'
elif row['E'] in ['AC_AV','LR_AV']:
unit='[mm/s²]'
else:
unit='TBD in Lx'
except:
unit='ERROR'
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return unit
def getDfFromODIHelperyDesc(row):
"""
returns Desc
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
desc=None
try:
if row['E'] in ['AL_S','SB_S']:
desc='Status'
elif row['E'] in ['LR_AV','LP_AV','QD_AV','SD_AV','AM_AV','FZ_AV','MZ_AV','NG_AV']:
desc='Fluss'
elif row['E'] in ['AC_AV','LR_AV']:
desc='Beschleunigung'
else:
desc='TBD in Lx'
except:
desc='ERROR'
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return desc
def getDfIDUniqueCols(dfID):
"""
returns df with uniques
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfIDUniqueCols=pd.DataFrame()
try:
# Spalte mit der groessten Anzahl von Auspraegungen feststellen
lenMax=0
colMax=''
# ueber alle Spalten
for idx,col in enumerate(dfID):
s=pd.Series(dfID[col].unique())
if len(s) > lenMax:
lenMax=len(s)
colMax=col
s=pd.Series(dfID[colMax].unique(),name=colMax)
s.sort_values(inplace=True)
s=pd.Series(s.values,name=colMax)
dfIDUniqueCols=pd.DataFrame(s)
# ueber alle weiteren Spalten
for idx,col in enumerate([col for col in dfID.columns if col != colMax]):
# s unique erzeugen
s=pd.Series(dfID[col].unique(),name=col)
# s sortieren
s.sort_values(inplace=True)
s=pd.Series(s.values,name=col)
dfIDUniqueCols=pd.concat([dfIDUniqueCols,s],axis=1)
dfIDUniqueCols=dfIDUniqueCols[dfID.columns]
except:
logger.error("{0:s}".format(logStr))
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfIDUniqueCols
def getIDsFromID(ID='Objects.3S_XYZ_SEG_INFO.3S_L_6_KED_39_EL1.In.AL_S',dfID=None,matchCols=['B','C1','C2','C3','C4','C5','D'],any=False):
"""
returns IDs matching ID
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
IDsMatching=[]
s=dfID.loc[ID,:]
for ID,row in dfID.iterrows():
match=True
for col in [col for col in row.index.values if col in matchCols]:
#if str(row[col])!=str(s[col]):
if row[col]!=s[col]:
match=False
break
else:
if any:
break
if match:
IDsMatching.append(ID)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
#except:
# logger.error("{0:s}".format(logStr))
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return sorted(IDsMatching)
def getLDSResVecDf(
ID # ResVec-Defining-Channel; i.e. for Segs Objects.3S_XYZ_SEG_INFO.3S_L_6_EL1_39_TUD.In.AL_S / i.e. for Drks Objects.3S_XYZ_DRUCK.3S_6_EL1_39_PTI_02_E.In.AL_S
,dfID
,TCsLDSResDf
,matchCols # i.e. ['B','C1','C2','C3','C4','C5','C6','D'] for Segs; i.e. ['B','C','D'] for Drks
):
"""
returns a df with LDSResChannels as columns (AL_S, ...); derived by Filtering columns from TCsLDSResDf and renaming them
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfResVec=pd.DataFrame()
try:
IDs=getIDsFromID(ID=ID,dfID=dfID,matchCols=matchCols)
dfFiltered=TCsLDSResDf.filter(items=IDs)
colDct={}
for col in dfFiltered.columns:
m=re.search(pID,col)
colDct[col]=m.group('E')
dfResVec=dfFiltered.rename(columns=colDct)
except:
logger.error("{0:s}".format(logStr))
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfResVec
def fGetFirstAndLastValidIdx(df):
"""
returns (tFirst,tLast)
"""
for idx,col in enumerate(df.columns):
tF=df[col].first_valid_index()
tL=df[col].last_valid_index()
if idx==0:
tFirst=tF
tLast=tL
else:
if tF < tFirst:
tFirst=tF
if tL > tLast:
tLast=tL
return (tFirst,tLast)
def fGetIDSets(
dfID
,divNr #'7'
,pipelineNrLst #['43','44']
,fctIn=None # Funktion von ID die Falsch heraus gibt, wenn ID (doch) nicht in Menge sein soll
):
# returns Dct: key: Bezeichner einer ID-Menge; value: zugeh. IDs
IDSets={}
IDs=[]
for ID in sorted(dfID.index.unique()):
m=re.search(pID,ID)
if m != None:
C1= m.group('C1')
C2= m.group('C2')
C3= m.group('C3')
C4= m.group('C4')
C5= m.group('C5')
if C1 in [divNr] and C3 in pipelineNrLst: # u.a. SEG ErgVecs
IDs.append(ID)
elif C2 in [divNr] and C4 in pipelineNrLst:
IDs.append(ID)
elif C3 in [divNr] and C5 in pipelineNrLst: # FT, PTI, etc.
IDs.append(ID)
if fctIn != None:
IDs=[ID for ID in IDs if fctIn(ID)]
IDSets['IDs']=IDs
IDsAlarm=[ID for ID in IDs if re.search(pID,ID).group('E') == 'AL_S']
IDSets['IDsAlarm']=IDsAlarm
IDsAlarmSEG=[ID for ID in IDsAlarm if re.search(pID,ID).group('C5') != 'PTI']
IDSets['IDsAlarmSEG']=IDsAlarmSEG
IDsAlarmDruck=[ID for ID in IDsAlarm if re.search(pID,ID).group('C5') == 'PTI']
IDSets['IDsAlarmDruck']=IDsAlarmDruck
IDsStat=[ID for ID in IDs if re.search(pID,ID).group('E') == 'STAT_S']
IDSets['IDsStat']=IDsStat
IDsStatSEG=[ID for ID in IDsStat if re.search(pID,ID).group('C5') != 'PTI']
IDSets['IDsStatSEG']=IDsStatSEG
IDsStatDruck=[ID for ID in IDsStat if re.search(pID,ID).group('C5') == 'PTI']
IDSets['IDsStatDruck']=IDsStatDruck
###
IDsSb=[ID for ID in IDs if re.search(pID,ID).group('E') == 'SB_S']
IDSets['IDsSb']=IDsSb
IDsSbSEG=[ID for ID in IDsSb if re.search(pID,ID).group('C5') != 'PTI']
IDSets['IDsSbSEG']=IDsSbSEG
IDsSbDruck=[ID for ID in IDsSb if re.search(pID,ID).group('C5') == 'PTI']
IDSets['IDsSbDruck']=IDsSbDruck
###
IDsZHK=[ID for ID in IDs if re.search(pID,ID).group('E') == 'ZHKNR_S']
IDSets['IDsZHK']=IDsZHK
IDsZHKSEG=[ID for ID in IDsZHK if re.search(pID,ID).group('C5') != 'PTI']
IDSets['IDsZHKSEG']=IDsZHKSEG
IDsZHKDruck=[ID for ID in IDsZHK if re.search(pID,ID).group('C5') == 'PTI']
IDSets['IDsZHKDruck']=IDsZHKDruck
IDsFT=[ID for ID in IDs if re.search(pID,ID).group('C4') == 'FT']
IDSets['IDsFT']=IDsFT
IDsPT=[ID for ID in IDs if re.search(pID,ID).group('C4') == 'PTI']
IDSets['IDsPT']=IDsPT
IDsPT_BCIND=[ID for ID in IDs if re.search(pID,ID).group('C5') == 'PTI' and re.search(pID,ID).group('E') == 'BCIND_S' ]
IDSets['IDsPT_BCIND']=IDsPT_BCIND
### Schieber
IDsZUST=[ID for ID in IDs if re.search(pID,ID).group('E') == 'ZUST']
IDsZUST=sorted(IDsZUST,key=lambda x: re.match(pID,x).group('C5'))
IDSets['IDsZUST']=IDsZUST
IDs_3S_XYZ_ESCHIEBER=[ID for ID in IDs if re.search(pID,ID).group('B') == '3S_FBG_ESCHIEBER']
IDs_3S_XYZ_ESCHIEBER=sorted(IDs_3S_XYZ_ESCHIEBER,key=lambda x: re.match(pID,x).group('C6'))
IDSets['IDs_3S_XYZ_ESCHIEBER']=IDs_3S_XYZ_ESCHIEBER
IDs_XYZ_ESCHIEBER=[ID for ID in IDs if re.search(pID,ID).group('B') == 'FBG_ESCHIEBER']
IDs_XYZ_ESCHIEBER=sorted(IDs_XYZ_ESCHIEBER,key=lambda x: re.match(pID,x).group('C5')) #
IDSets['IDs_XYZ_ESCHIEBER']=IDs_XYZ_ESCHIEBER
IDs_XYZ_ESCHIEBER_Ohne_ZUST=[ID for ID in IDs_XYZ_ESCHIEBER if re.search(pID,ID).group('E') != 'ZUST']
IDs_XYZ_ESCHIEBER_Ohne_ZUST=sorted(IDs_XYZ_ESCHIEBER_Ohne_ZUST,key=lambda x: re.match(pID,x).group('C5'))
IDSets['IDs_XYZ_ESCHIEBER_Ohne_ZUST']=IDs_XYZ_ESCHIEBER_Ohne_ZUST
IDsSchieberAlle=IDsZUST+IDs_XYZ_ESCHIEBER_Ohne_ZUST+IDs_3S_XYZ_ESCHIEBER
IDSets['IDsSchieberAlle']=IDsSchieberAlle
IDsSchieberAlleOhneLAEUFT=[ID for ID in IDsSchieberAlle if re.search('LAEUFT$',ID) == None]
IDsSchieberAlleOhneLAEUFT=[ID for ID in IDsSchieberAlleOhneLAEUFT if re.search('LAEUFT_NICHT$',ID) == None]
IDSets['IDsSchieberAlleOhneLAEUFT']=IDsSchieberAlleOhneLAEUFT
return IDSets
h5KeySep='/'
def fValueFct(x):
return pd.to_numeric(x,errors='ignore',downcast='float')
class AppLog():
"""
SIR 3S App Log (SQC Log)
Maintains a H5-File.
Existing H5-File will be deleted (if not initialized with h5File=...).
H5-Keys are:
* init
* lookUpDf
* lookUpDfZips (if initialized with zip7Files=...)
* Logfilenames praefixed by Log without extension
Attributes:
* h5File
* lookUpDf
zipName
logName
FirstTime (ScenTime - not #LogTime)
LastTime (ScenTime - mot #LogTime)
* lookUpDfZips
"""
TCsdfOPCFill=False # wenn Wahr, werden in TCsdfOPCFill die NULLen aufgefuellt; default: Falsch
@classmethod
def getTCsFromDf(cls,df,dfID=pd.DataFrame(),TCsdfOPCFill=TCsdfOPCFill):
"""
returns several TC-dfs from df
Verarbeitung von dfs gemaess extractTCsToH5s; siehe dort
Args:
* df: a df with Log-Data
* columns: ['ID','ProcessTime','ScenTime','SubSystem','Value','Direction']
* dfID
* index: ID
* erf. nur, wenn IDs nach Res1 und Res2 aufgeteilt werden sollen
* TCsdfOPCFill: if True (default): fill NaNs in this df
Time curve dfs: cols:
* Time (TCsdfOPC: ProcessTime, other: ScenTime)
* ID
* Value
Time curve dfs:
* TCsdfOPC
* TCsSirCalc
* TCsLDSIn
* TCsLDSRes (dfID empty) or TCsLDSRes1, TCsLDSRes2
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
TCsdfOPC=pd.DataFrame()
TCsdfSirCalc=pd.DataFrame()
TCsdfLDSIn=pd.DataFrame()
if not dfID.empty:
TCsdfLDSRes1=pd.DataFrame()
TCsdfLDSRes2=pd.DataFrame()
else:
TCsdfLDSRes=pd.DataFrame()
if not dfID.empty:
df=df.merge(dfID,how='left',left_on='ID',right_index=True,suffixes=('','_r'))
logger.debug("{0:s}{1:s}".format(logStr,'TCsdfOPC ...'))
TCsdfOPC=df[(df['SubSystem'].str.contains('^OPC'))
### & ~(df['Value'].isnull()) # ueberfluessig, wenn df dies bereits erfuellt
][['ProcessTime','ID','Value']].pivot_table(index='ProcessTime', columns='ID', values='Value',aggfunc='last')
if TCsdfOPCFill:
for col in TCsdfOPC.columns:
TCsdfOPC[col]=TCsdfOPC[col].fillna(method='ffill')
TCsdfOPC[col]=TCsdfOPC[col].fillna(method='bfill')
logger.debug("{0:s}{1:s}".format(logStr,'TCsdfSirCalc ...'))
TCsdfSirCalc=df[(df['SubSystem'].str.contains('^SirCalc')) | (df['SubSystem'].str.contains('^RTTM')) ][['ScenTime','ID','Value']].pivot_table(index='ScenTime', columns='ID', values='Value',aggfunc='last')
logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSIn ...'))
TCsdfLDSIn=df[(df['SubSystem'].str.contains('^LDS')) & (df['Direction'].str.contains('^<-'))][['ScenTime','ID','Value']].pivot_table(index='ScenTime', columns='ID', values='Value',aggfunc='last')
if not dfID.empty:
logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSRes1 ...'))
TCsdfLDSRes1=df[(df['SubSystem'].str.contains('^LDS')) & (df['Direction'].str.contains('^->')) & (df['B'].str.contains('^3S_FBG_SEG_INFO'))][['ScenTime','ID','Value']].pivot_table(index='ScenTime', columns='ID', values='Value',aggfunc='last')
logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSRes2 ...'))
TCsdfLDSRes2=df[(df['SubSystem'].str.contains('^LDS')) & (df['Direction'].str.contains('^->')) & (df['B'].str.contains('^3S_FBG_DRUCK'))][['ScenTime','ID','Value']].pivot_table(index='ScenTime', columns='ID', values='Value',aggfunc='last')
else:
logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSRes ...'))
TCsdfLDSRes=df[(df['SubSystem'].str.contains('^LDS')) & (df['Direction'].str.contains('^->'))][['ScenTime','ID','Value']].pivot_table(index='ScenTime', columns='ID', values='Value',aggfunc='last')
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
if not dfID.empty:
return TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1,TCsdfLDSRes2
else:
return TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes
def __init__(self,logFile=None,zip7File=None,h5File=None,h5FileName=None,readWithDictReader=False,nRows=None,readWindowsLog=False):
"""
(re-)initialize
logFile:
wird gelesen und in H5 abgelegt
addZip7File(zip7File) liest alle Logs eines zipFiles und legt diese in H5 ab
zipFile:
1. logFile wird gelesen und in H5 abgelegt
addZip7File(zip7File) liest alle Logs eines zipFiles und legt diese in H5 ab
die Initialisierung mit zipFile ist identisch mit der Initialisierung mit logFile wenn logFile das 1. logFile des Zips ist
nach addZip7File(zip7File) - ggf. mehrfach fuer mehrere Zips:
koennen Daten mit self.get(...) gelesen werden (liefert 1 df)
koennen Daten mit self.getTCs(...) gelesen werden (liefert mehrere dfs in TC-Form)
koennen Daten mit self.getTCsSpecified(...) gelesen werden (liefert 1 df in TC-Form)
koennen Daten in TC-Form mit self.extractTCsToH5s(...) in separate H5s gelesen werden
mit self.getTCsFromH5s(...) koennen die TCs wieder gelesen werden
=== addZip7File(zip7File) - ggf. mehrfach - und extractTCsToH5s(...) sind Bestandteil einer 7Zip-Verarbeitung vor der eigentlichen Analyse ===
h5File:
die lookUp-Dfs vom H5-File werden gelesen
die zum H5-File zugehoerigen TC-H5-Filenamen werden belegt
die TC-H5-Files werden nicht auf Existenz geprüft oder gar gelesen
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
self.lookUpDf=pd.DataFrame()
self.lookUpDfZips=pd.DataFrame()
try:
if logFile != None and zip7File != None and h5File != None:
logger.debug("{0:s}{1:s}".format(logStr,'3 Files (logFile and zip7File and h5File) specified.'))
elif logFile != None and zip7File != None:
logger.debug("{0:s}{1:s}".format(logStr,'2 Files (logFile and zip7File) specified.'))
elif logFile != None and h5File != None:
logger.debug("{0:s}{1:s}".format(logStr,'2 Files (logFile and h5File) specified.'))
elif h5File != None and zip7File != None:
logger.debug("{0:s}{1:s}".format(logStr,'2 Files (h5File and zip7File) specified.'))
elif logFile != None:
self.__initlogFile(logFile,h5FileName=h5FileName,readWithDictReader=readWithDictReader,readWindowsLog=readWindowsLog)
elif zip7File != None:
self.__initzip7File(zip7File,h5FileName=h5FileName,readWithDictReader=readWithDictReader,readWindowsLog=readWindowsLog)
elif h5File != None:
self.__initWithH5File(h5File)
else:
logger.debug("{0:s}{1:s}".format(logStr,'No File (logFile XOR zip7File XOR h5File) specified.'))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def __initlogFile(self,logFile,h5FileName=None,readWithDictReader=False,readWindowsLog=False):
"""
(re-)initialize with logFile
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# wenn logFile nicht existiert ...
if not os.path.exists(logFile):
logger.debug("{0:s}logFile {1:s} not existing.".format(logStr,logFile))
else:
df = self.__processALogFile(logFile=logFile,readWithDictReader=readWithDictReader,readWindowsLog=readWindowsLog)
self.__initH5File(logFile,df,h5FileName=h5FileName)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def __initH5File(self,h5File,df,h5FileName=None):
"""
creates self.h5File and writes 'init'-Key Logfile df to it
Args:
* h5File: name of logFile or zip7File; the Dir is the Dir of the H5-File
* df
* h5FileName: the H5-FileName without Dir and Extension; if None (default), "Log ab ..." is used
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
(h5FileHead,h5FileTail)=os.path.split(h5File)
# H5-File
if h5FileName==None:
h5FileTail="Log ab {0:s}.h5".format(str(df['#LogTime'].min())).replace(':',' ').replace('-',' ')
else:
h5FileTail=h5FileName+'.h5'
self.h5File=os.path.join(h5FileHead,h5FileTail)
# wenn H5 existiert wird es geloescht
if os.path.exists(self.h5File):
os.remove(self.h5File)
logger.debug("{0:s}Existing H5-File {1:s} deleted.".format(logStr,h5FileTail))
# init-Logfile schreiben
self.__toH5('init',df)
logger.debug("{0:s}'init'-Key Logfile done.".format(logStr))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def __initWithH5File(self,h5File,useRawHdfAPI=False):
"""
self.h5File=h5File
self.lookUpDf
self.lookUpDfZips
die lookUp-Dfs werden gelesen vom H5-File
die zum H5-File zugehoerigen TC-H5-Filenamen werden belegt, wenn diese H5-Files existieren
die TC-H5-Files werden nicht gelesen
der zum H5-File zugehoerige CVD-Filename wird belegt, wenn das H5-File existiert
das H5-File wird nicht gelesen
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# H5 existiert
if os.path.exists(h5File):
self.h5File=h5File
# Keys available
with pd.HDFStore(self.h5File) as h5Store:
h5Keys=sorted(h5Store.keys())
logger.debug("{0:s}h5Keys available: {1:s}".format(logStr,str(h5Keys)))
h5KeysStripped=[item.replace(h5KeySep,'') for item in h5Keys]
if useRawHdfAPI:
with pd.HDFStore(self.h5File) as h5Store:
if 'lookUpDf' in h5KeysStripped:
self.lookUpDf=h5Store['lookUpDf']
if 'lookUpDfZips' in h5KeysStripped:
self.lookUpDfZips=h5Store['lookUpDfZips']
else:
if 'lookUpDf' in h5KeysStripped:
self.lookUpDf=pd.read_hdf(self.h5File, key='lookUpDf')
if 'lookUpDfZips' in h5KeysStripped:
self.lookUpDfZips=pd.read_hdf(self.h5File, key='lookUpDfZips')
else:
logStrFinal="{0:s}h5File {1:s} not existing.".format(logStr,h5File)
logger.debug(logStrFinal)
raise LxError(logStrFinal)
#TC-H5s
(name,ext)=os.path.splitext(self.h5File)
TCPost='_TC'
h5FileOPC=name+TCPost+'OPC'+ext
h5FileSirCalc=name+TCPost+'SirCalc'+ext
h5FileLDSIn=name+TCPost+'LDSIn'+ext
h5FileLDSRes1=name+TCPost+'LDSRes1'+ext
h5FileLDSRes2=name+TCPost+'LDSRes2'+ext
h5FileLDSRes=name+TCPost+'LDSRes'+ext
if os.path.exists(h5FileOPC):
self.h5FileOPC=h5FileOPC
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileOPC))
if os.path.exists(h5FileSirCalc):
self.h5FileSirCalc=h5FileSirCalc
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileSirCalc))
if os.path.exists(h5FileLDSIn):
self.h5FileLDSIn=h5FileLDSIn
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileLDSIn))
if os.path.exists(h5FileLDSRes):
self.h5FileLDSRes=h5FileLDSRes
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileLDSRes))
if os.path.exists(h5FileLDSRes1):
self.h5FileLDSRes1=h5FileLDSRes1
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileLDSRes1))
if os.path.exists(h5FileLDSRes2):
self.h5FileLDSRes2=h5FileLDSRes2
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileLDSRes2))
h5FileCVD=name+'_'+'CVD'+ext
if os.path.exists(h5FileCVD):
self.h5FileCVD=h5FileCVD
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileCVD))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def getInitDf(self,useRawHdfAPI=False):
"""
returns InitDf from H5-File
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
df=pd.DataFrame()
# H5 existiert
if os.path.exists(self.h5File):
# Keys available
with pd.HDFStore(self.h5File) as h5Store:
h5Keys=sorted(h5Store.keys())
logger.debug("{0:s}h5Keys available: {1:s}".format(logStr,str(h5Keys)))
h5KeysStripped=[item.replace(h5KeySep,'') for item in h5Keys]
if useRawHdfAPI:
with pd.HDFStore(self.h5File) as h5Store:
if 'init' in h5KeysStripped:
df=h5Store['init']
else:
if 'init' in h5KeysStripped:
df=pd.read_hdf(self.h5File, key='init')
else:
logStrFinal="{0:s}h5File {1:s} not existing.".format(logStr,h5File)
logger.debug(logStrFinal)
raise LxError(logStrFinal)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return df
def __initzip7File(self,zip7File,h5FileName=None,nRows=None,readWithDictReader=False,readWindowsLog=False):
"""
(re-)initialize with zip7File
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# wenn zip7File nicht existiert ...
if not os.path.exists(zip7File):
logStrFinal="{0:s}zip7File {1:s} not existing.".format(logStr,zip7File)
logger.debug(logStrFinal)
raise LxError(logStrFinal)
else:
(zip7FileHead, zip7FileTail)=os.path.split(zip7File)
zipFileDirname=os.path.dirname(zip7File)
logger.debug("{0:s}zipFileDirname: {1:s}".format(logStr,zipFileDirname))
aDfRead=False
with py7zr.SevenZipFile(zip7File, 'r') as zip7FileObj:
allLogFiles = zip7FileObj.getnames()
logger.debug("{0:s}{1:s}: len(getnames()): {2:d}.".format(logStr,zip7FileTail,len(allLogFiles)))
logger.debug("{0:s}getnames(): {1:s}.".format(logStr,str(allLogFiles)))
extDirLstTBDeleted=[]
extDirLstExistingLogged=[]
for idx,logFileNameInZip in enumerate(allLogFiles):
logger.debug("{0:s}idx: {1:d} logFileNameInZip: {2:s}".format(logStr,idx,logFileNameInZip))
# die Datei die 7Zip bei extract erzeugen wird
logFile=os.path.join(zipFileDirname,logFileNameInZip)
(logFileHead, logFileTail)=os.path.split(logFile) # logFileHead == dirname()
logger.debug("{0:s}idx: {1:d} logFileHead: {2:s} logFileTail: {3:s}".format(logStr,idx,logFileHead,logFileTail))
(name, ext)=os.path.splitext(logFile)
logger.debug("{0:s}idx: {1:d} name: {2:s} ext: {3:s}".format(logStr,idx,name,ext))
if logFileHead!='': # logFileHead == dirname()
if os.path.exists(logFileHead) and logFileHead not in extDirLstExistingLogged:
logger.debug("{0:s}idx: {1:d} Verz. logFileHead: {2:s} existiert bereits.".format(logStr,idx,logFileHead))
extDirLstExistingLogged.append(logFileHead)
elif not os.path.exists(logFileHead):
logger.debug("{0:s}idx: {1:d} Verz. logFileHead: {2:s} existiert noch nicht.".format(logStr,idx,logFileHead))
extDirLstTBDeleted.append(logFileHead)
# kein Logfile zu prozessieren ...
if ext == '':
continue
# Logfile prozessieren ...
if os.path.exists(logFile):
isFile = os.path.isfile(logFile)
if isFile:
logger.debug("{0:s}idx: {1:d} Log: {2:s} existiert bereits. Wird durch Extrakt ueberschrieben werden.".format(logStr,idx,logFileTail))
logFileTBDeleted=False
else:
logFileTBDeleted=False
else:
logger.debug("{0:s}idx: {1:d} Log: {2:s} existiert nicht. Wird extrahiert, dann prozessiert und dann wieder geloescht.".format(logStr,idx,logFileTail))
logFileTBDeleted=True
# extrahieren
zip7FileObj.extract(path=zipFileDirname,targets=logFileNameInZip)
if os.path.exists(logFile):
pass
else:
logger.warning("{0:s}idx: {1:d} Log: {2:s} NOT extracted?! Continue with next Name in 7Zip.".format(logStr,idx,logFileTail))
# nichts zu prozessieren ...
continue
# ...
if os.path.isfile(logFile):
df = self.__processALogFile(logFile=logFile,nRows=nRows,readWithDictReader=readWithDictReader,readWindowsLog=readWindowsLog)
if df is None:
logger.warning("{0:s}idx: {1:d} Log: {2:s} NOT processed?! Continue with next Name in 7Zip.".format(logStr,idx,logFileTail))
# nichts zu prozessieren ...
continue
else:
aDfRead=True
# ...
# gleich wieder loeschen
if os.path.exists(logFile) and logFileTBDeleted:
if os.path.isfile(logFile):
os.remove(logFile)
logger.debug("{0:s}idx: {1:d} Log: {2:s} wieder geloescht.".format(logStr,idx,logFileTail))
# wir wollen nur das 1. File lesen ...
if aDfRead:
break;
for dirName in extDirLstTBDeleted:
if os.path.exists(dirName):
if os.path.isdir(dirName):
(dirNameHead, dirNameTail)=os.path.split(dirName)
if len(os.listdir(dirName)) == 0:
os.rmdir(dirName)
logger.debug("{0:s}dirName: {1:s} existierte nicht und wurde wieder geloescht.".format(logStr,dirNameTail))
else:
logger.info("{0:s}dirName: {1:s} existiert mit nicht leerem Inhalt?!".format(logStr,dirNameTail))
self.__initH5File(zip7File,df,h5FileName=h5FileName)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def __toH5(self,key,df,useRawHdfAPI=False,updLookUpDf=False,logName='',zipName='',noDfStorage=False):
"""
write df with key to H5-File (if not noDfStorage)
Args:
* updLookUpDf: if True, self.lookUpDf is updated with
* zipName (the Zip of logFile)
* logName (the name of the logFile i.e. 20201113_0000004.log)
* FirstTime (the first ScenTime in df)
* LastTime (the last ScenTime in df)
self.lookUpDf is not wriiten to H5
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
(h5FileHead,h5FileTail)=os.path.split(self.h5File)
if not noDfStorage:
if useRawHdfAPI:
with pd.HDFStore(self.h5File) as h5Store:
try:
h5Store.put(key,df)
except Exception as e:
logger.error("{0:s}Writing df with h5Key={1:s} to {2:s} FAILED!".format(logStr,key,h5FileTail))
raise e
else:
df.to_hdf(self.h5File, key=key)
logger.debug("{0:s}Writing df with h5Key={1:s} to {2:s} done.".format(logStr,key,h5FileTail))
if updLookUpDf:
s=df['ScenTime']#['#LogTime']
FirstTime=s.iloc[0]
LastTime=s.iloc[-1]
if self.lookUpDf.empty:
data={ 'zipName': [zipName]
,'logName': [logName]
,'FirstTime' : [FirstTime]
,'LastTime' : [LastTime]
}
self.lookUpDf = pd.DataFrame (data, columns = ['zipName','logName','FirstTime','LastTime'])
self.lookUpDf['zipName']=self.lookUpDf['zipName'].astype(str)
self.lookUpDf['logName']=self.lookUpDf['logName'].astype(str)
else:
data={ 'zipName': zipName
,'logName': logName
,'FirstTime' : FirstTime
,'LastTime' : LastTime
}
self.lookUpDf=self.lookUpDf.append(data,ignore_index=True)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def __processALogFile(self,logFile=None,delimiter='\t',nRows=None,readWithDictReader=False,fValueFct=fValueFct,readWindowsLog=False):
"""
process logFile
Args:
* logFile: logFile to be processed
* nRows: number of logFile rows to be processed; default: None (:= all rows are processed); if readWithDictReader: last row is also processed
* readWithDictReader: if True, csv.DictReader is used; default: None (:= pd.read_csv is used)
Returns:
* df: logFile processed to df
* converted:
* #LogTime: to datetime
* ProcessTime: to datetime
* Value: to float64
* ID,Direction,SubSystem,LogLevel,State,Remark: to str
* new:
* ScenTime datetime
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
df=None
try:
with open(logFile,'r') as f:
pass
(logFileHead,logFileTail)=os.path.split(logFile)
if readWithDictReader:
restkey='+'
with open(logFile,"r") as csvFile: # 1. Zeile enthaelt die Ueberschrift
reader = csv.DictReader(csvFile,delimiter=delimiter,restkey=restkey)
logger.debug("{0:s}{1:s} csv.DictReader reader processed.".format(logStr,logFileTail))
# If a row has more fields than fieldnames, the remaining data is put in a list and stored with the fieldname specified by restkey.
colNames=reader.fieldnames
dcts = [dct for dct in reader] # alle Zeilen lesen
logger.debug("{0:s}{1:s} csv.DictReader-Ergebnis processed.".format(logStr,logFileTail))
if nRows!=None:
dcts=dcts[0:nRows]+[dcts[-1]]
# nur die Spaltennamen werden als row-Spalten erzeugt
rows = [[dct[colName] for colName in colNames] for dct in dcts]
logger.debug("{0:s}{1:s} rows processed.".format(logStr,logFileTail))
# die "ueberfluessigen" Spalten an die letzte Spalte dranhaengen
for i, dct in enumerate(dcts):
if restkey in dct:
restValue=dct[restkey]
restValueStr = delimiter.join(restValue)
newValue=rows[i][-1]+delimiter+restValueStr
#logger.debug("{0:s}{1:s} restValueStr: {2:s} - Zeile {3:10d}: {4:s} - neuer Wert letzte Spalte: {5:s}.".format(logStr,logFileTail,restValueStr,i,str(rows[i]),newValue))
rows[i][-1]=rows[i][-1]+newValue
logger.debug("{0:s}{1:s} restkey processed.".format(logStr,logFileTail))
index=range(len(rows))
df = pd.DataFrame(rows,columns=colNames,index=index)
else:
if nRows==None:
df=pd.read_csv(logFile,delimiter=delimiter,error_bad_lines=False,warn_bad_lines=True,low_memory=False)
else:
df=pd.read_csv(logFile,delimiter=delimiter,error_bad_lines=False,warn_bad_lines=True,low_memory=False,nrows=nRows)
logger.debug("{0:s}{1:s} pd.DataFrame processed.".format(logStr,logFileTail))
#logger.debug("{0:s}df: {1:s}".format(logStr,str(df)))
#LogTime
df['#LogTime']=pd.to_datetime(df['#LogTime'],unit='ms',errors='coerce') # NaT
#ProcessTime
df['ProcessTime']=pd.to_datetime(df['ProcessTime'],unit='ms',errors='coerce') # NaT
logger.debug("{0:s}{1:s} col ProcessTime processed.".format(logStr,logFileTail))
#Value
df['Value']=df.Value.str.replace(',', '.') # Exception: Line: 1137: <class 'AttributeError'>: Can only use .str accessor with string values!
df['Value']=fValueFct(df['Value'].values) # df['ValueProcessed'].apply(fValueFct)
logger.debug("{0:s}{1:s} col Value processed.".format(logStr,logFileTail))
#Strings
for col in ['ID','Direction','SubSystem','LogLevel','State','Remark']:
df[col]=df[col].astype(str)
logger.debug("{0:s}{1:s} String-cols processed.".format(logStr,logFileTail))
#1618249551621 STD CVD 1615442324000 p-p BEGIN_OF_NEW_CONTROL_VOLUME 6-10-SV1-RB~6-10-BID-RB NULL NULL # String in beiden Faellen (Linux und Windows) gleich?
#1618249551621 STD CVD <- 156 CV_ID
##ScenTime
## SubSystem Direction ProcessTime ID Value State Remark
## Linux ---
## 1615029280000 INF SQC Starting cycle for 2021-03-06 12:14:38.000
## 1615029280000 STD LDS MCL 1615029278000 Main cycle loop 06.03.2021 12:14:38.000 (ScenTime: Tag und Zeit in Klartext; Spalte ProcessTime ScenTime!)
## Windows ---
## 1618256150711 STD SQC 1615457121000 Main cycle loop 11:05:21.000 (ScenTime-Zeit in Klartext; Spalte ProcessTime ScenTime!)
dfScenTime=df[df['ID']=='Main cycle loop'][['ProcessTime']]
dfScenTime.rename(columns={'ProcessTime':'ScenTime'},inplace=True)
df=df.join(dfScenTime)
df['ScenTime']=df['ScenTime'].fillna(method='ffill')
df['ScenTime']=df['ScenTime'].fillna(method='bfill')
if df['ScenTime'].isnull().values.all():
logger.debug("{0:s}Keine Zeile mit ID=='Main cycle loop' gefunden. ScenTime zu #LogTime gesetzt.".format(logStr))
df['ScenTime']=df['#LogTime'] # wenn keine Zeile mit ID=='Main cycle loop' gefunden wurde, wird ScenTime zu #LogTime gesetzt
# finalisieren
df=df[['#LogTime','LogLevel','SubSystem','Direction','ProcessTime','ID','Value','ScenTime','State','Remark']]
logger.debug("{0:s}{1:s} processed with nRows: {2:s} (None if all).".format(logStr,logFileTail,str(nRows)))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return df
def rebuildLookUpDfZips(self,zip7Files,readWithDictReader=True,readWindowsLog=False):
"""
(re-)initialize with zip7Files
only persistent outcome is lookUpDfZips (Attribute and H5-Persistence)
lookUpdf is changed but not H5-stored
(Re-)Init with AppLog(h5File=...) after using rebuildLookUpDfZips to obtain old lookUpdf
main Usage of rebuildLookUpDfZips is to determine which zip7Files to add by i.e.:
zip7FilesToAdd=lx.lookUpDfZips[~(lx.lookUpDfZips['LastTime']<timeStartAusschnitt) & ~(lx.lookUpDfZips['FirstTime']>timeEndAusschnitt)].index.to_list()
"""
#noDfStorage=False
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
#self.__initzip7File(zip7File=zip7Files[0],h5FileName=h5FileName,nRows=1,readWithDictReader=True)
for zip7File in zip7Files:
logger.info("{0:s}addZip7File: {1:s}".format(logStr,zip7File))
self.addZip7File(zip7File,firstsAndLastsLogsOnly=True,nRows=1,readWithDictReader=readWithDictReader,noDfStorage=True,readWindowsLog=readWindowsLog)
logger.debug("{0:s}lookUpDf: {1:s}".format(logStr,self.lookUpDf.to_string()))
df=self.lookUpDf.groupby(by='zipName').agg(['min', 'max'])
logger.debug("{0:s}df: {1:s}".format(logStr,df.to_string()))
minTime=df.loc[:,('FirstTime','min')]
maxTime=df.loc[:,('LastTime','max')]
minFileNr=df.loc[:,('logName','min')].apply(lambda x: int(re.search(logFilenamePattern,x).group(3)))
maxFileNr=df.loc[:,('logName','max')].apply(lambda x: int(re.search(logFilenamePattern,x).group(3)))
s=(maxTime-minTime)/(maxFileNr-minFileNr)
lookUpDfZips=s.to_frame().rename(columns={0:'TimespanPerLog'})
lookUpDfZips['NumOfFiles']=maxFileNr-minFileNr
lookUpDfZips['FirstTime']=minTime
lookUpDfZips['LastTime']=maxTime
lookUpDfZips['minFileNr']=minFileNr
lookUpDfZips['maxFileNr']=maxFileNr
lookUpDfZips=lookUpDfZips[['FirstTime','LastTime','TimespanPerLog','NumOfFiles','minFileNr','maxFileNr']]
# lookUpDfZips schreiben
self.lookUpDfZips=lookUpDfZips
self.__toH5('lookUpDfZips',self.lookUpDfZips)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def addZip7File(self,zip7File,firstsAndLastsLogsOnly=False,nRows=None,readWithDictReader=False,noDfStorage=False,readWindowsLog=False):
"""
add zip7File
Args:
* zipFile: zipFile which LogFiles shall be added
* Args for internal Usage:
* firstsAndLastsLogsOnly (True dann)
* nRows (1 dann)
* readWithDictReader (True dann)
d.h. es werden nur die ersten und letzten Logs pro Zip angelesen und dort auch nur die 1. und letzte Zeile und das mit DictReader
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# wenn zip7File nicht existiert ...
if not os.path.exists(zip7File):
logStrFinal="{0:s}zip7File {1:s} not existing.".format(logStr,zip7File)
logger.debug(logStrFinal)
raise LxError(logStrFinal)
else:
(zip7FileHead, zip7FileTail)=os.path.split(zip7File)
logger.debug("{0:s}zip7FileHead (leer wenn zip7 im selben Verz.): {1:s} zip7FileTail: {2:s}.".format(logStr,zip7FileHead,zip7FileTail))
logger.info("{0:s}zip7File: {1:s} ...".format(logStr,zip7File))
tmpDir=os.path.dirname(zip7File)
tmpDirContent=glob.glob(tmpDir)
with py7zr.SevenZipFile(zip7File, 'r') as zip7FileObj:
allLogFiles = zip7FileObj.getnames()
allLogFilesLen=len(allLogFiles)
logger.debug("{0:s}{1:s}: len(getnames()): {2:d}.".format(logStr,zip7FileTail,allLogFilesLen))
extDirLstTBDeleted=[]
extDirLstExistingLogged=[]
for idx,logFileNameInZip in enumerate(allLogFiles):
if firstsAndLastsLogsOnly:
if idx not in [0,1,allLogFilesLen-2,allLogFilesLen-1]:
#logger.debug("{0:s}idx: {1:d} item: {2:s} NOT processed ...".format(logStr,idx,logFileNameInZip))
continue
logger.info("{0:s}idx: {1:d} item: {2:s} ...".format(logStr,idx,logFileNameInZip))
# die Datei die 7Zip bei extract erzeugen wird
logFile=os.path.join(tmpDir,logFileNameInZip)
(logFileHead, logFileTail)=os.path.split(logFile)
# evtl. bezeichnet logFileNameInZip keine Datei sondern ein Verzeichnis
(name, ext)=os.path.splitext(logFileNameInZip)
if ext == '':
# Verzeichnis!
extDir=os.path.join(tmpDir,logFileNameInZip)
(extDirHead, extDirTail)=os.path.split(extDir)
if os.path.exists(extDir) and extDir in tmpDirContent:
logger.debug("{0:s}idx: {1:d} extDir: {2:s} existiert(e) bereits.".format(logStr,idx,extDirTail))
extDirLstExistingLogged.append(extDir)
elif os.path.exists(extDir) and extDir not in tmpDirContent:
logger.debug("{0:s}idx: {1:d} extDir: {2:s} existiert(e) noch nicht.".format(logStr,idx,extDirTail))
extDirLstTBDeleted.append(extDir)
elif not os.path.exists(extDir) and extDir not in tmpDirContent:
logger.debug("{0:s}idx: {1:d} extDir: {2:s} existiert(e) noch nicht.".format(logStr,idx,extDirTail))
extDirLstTBDeleted.append(extDir)
# kein Logfile zu prozessieren ...
continue
# logFileNameInZip bezeichnet eine Datei
if os.path.exists(logFile):
isFile = os.path.isfile(logFile)
if isFile:
logger.debug("{0:s}idx: {1:d} Log: {2:s} existiert bereits. Wird durch Extrakt ueberschrieben werden.".format(logStr,idx,logFileTail))
logFileTBDeleted=False
else:
logFileTBDeleted=False
else:
logger.debug("{0:s}idx: {1:d} Log: {2:s} existiert nicht. Wird extrahiert, dann prozessiert und dann wieder geloescht.".format(logStr,idx,logFileTail))
logFileTBDeleted=True
# extrahieren
logger.debug("{0:s}Log: {1:s} wird extrahiert ... ".format(logStr,logFileTail))
import lzma
try:
zip7FileObj.extract(path=tmpDir,targets=logFileNameInZip)
except lzma.LZMAError:
logger.warning("{0:s}Log: {1:s} nicht erfolgreich extrahiert - continue ... ".format(logStr,logFileTail))
continue
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
logger.debug("{0:s}Log: {1:s} wurde extrahiert. ".format(logStr,logFileTail))
if os.path.exists(logFile):
pass
else:
logger.warning("{0:s}idx: {1:d} Log: {2:s} NOT extracted?! Continue with next Name in 7Zip.".format(logStr,idx,logFileTail))
# nichts zu prozessieren ...
continue
# ...
if os.path.isfile(logFile):
df = self.__processALogFile(logFile=logFile,nRows=nRows,readWithDictReader=readWithDictReader,readWindowsLog=readWindowsLog)
if df is None:
logger.warning("{0:s}idx: {1:d} Log: {2:s} NOT processed?! Continue with next Name in 7Zip.".format(logStr,idx,logFileTail))
# nichts zu prozessieren ...
continue
# ...
# gleich wieder loeschen
if os.path.exists(logFile) and logFileTBDeleted:
if os.path.isfile(logFile):
os.remove(logFile)
logger.debug("{0:s}idx: {1:d} Log: {2:s} wieder geloescht.".format(logStr,idx,logFileTail))
# ...
(name, ext)=os.path.splitext(logFileTail)
key='Log'+name
if zip7FileHead != '':
zipName=os.path.join(os.path.relpath(zip7FileHead),zip7FileTail)
else:
zipName=zip7FileTail
# df schreiben
self.__toH5(key,df,updLookUpDf=True,logName=logFileTail,zipName=zipName,noDfStorage=noDfStorage)#os.path.join(os.path.relpath(zip7FileHead),zip7FileTail))
# danach gleich lookUpDf schreiben ...
self.__toH5('lookUpDf',self.lookUpDf,noDfStorage=noDfStorage)
for dirName in extDirLstTBDeleted:
if os.path.exists(dirName):
if os.path.isdir(dirName):
(dirNameHead, dirNameTail)=os.path.split(dirName)
if len(os.listdir(dirName)) == 0:
os.rmdir(dirName)
logger.debug("{0:s}dirName: {1:s} existierte nicht und wurde wieder geloescht.".format(logStr,dirNameTail))
else:
logger.info("{0:s}dirName: {1:s} existiert mit nicht leerem Inhalt?!".format(logStr,dirNameTail))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def getTotalLogTime(self):
"""
Returns Tuple: firstTime,lastTime,tdTotalGross,tdTotal,tdBetweenFilesTotal # Brutto-Logzeit, Netto-Logzeit, Summe aller Zeiten zwischen 2 Logdateien (sollte = Brutto-Netto sein)
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# Inhalt der Logs
tdTotal=pd.Timedelta('0 Seconds')
tdBetweenFilesTotal=pd.Timedelta('0 Seconds')
for idx,(index,row) in enumerate(self.lookUpDf.iterrows()):
if idx > 0:
tdBetweenFiles=row["FirstTime"]-lastTime
tdBetweenFilesTotal=tdBetweenFilesTotal+tdBetweenFiles
if tdBetweenFiles > pd.Timedelta('0 second'):
if tdBetweenFiles > pd.Timedelta('1 second'):
logger.info("{:s}Zeitdifferenz: {!s:s} zwischen {:s} ({:s}) und {:s} ({:s})".format(logStr,
str(tdBetweenFiles).replace('days','Tage')
,lastFile,lastZip
,row["logName"],row["zipName"]
))
pass
if tdBetweenFiles < pd.Timedelta('0 second'):
if tdBetweenFiles < -pd.Timedelta('1 second'):
pass
logger.info("{:s}Zeitueberlappung > 1s: {!s:s} zwischen {:s} ({:s}) und {:s} ({:s})".format(logStr,
str(tdBetweenFiles).replace('days','Tage')
,lastFile,lastZip
,row["logName"],row["zipName"]
))
td=row["LastTime"]-row["FirstTime"]
if type(td) == pd.Timedelta:
tdTotal=tdTotal+td
else:
print(index)# Fehler!
lastTime=row["LastTime"]
lastFile=row["logName"]
lastZip=row["zipName"]
firstTime=self.lookUpDf.iloc[0]["FirstTime"]
lastTime=self.lookUpDf.iloc[-1]["LastTime"]
tdTotalGross=lastTime-firstTime
tdTotalGross,tdTotal,tdBetweenFilesTotal
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return firstTime,lastTime,tdTotalGross,tdTotal,tdBetweenFilesTotal
def extractTCsToH5s(self,dfID=pd.DataFrame(),timeStart=None,timeEnd=None,TCsdfOPCFill=TCsdfOPCFill):
"""
extracts TC-Data (and CVD-Data) from H5 to seperate H5-Files (Postfixe: _TCxxx.h5 and _CVD.h5)
TCsdfOPCFill: wenn Wahr, werden in TCsdfOPCFill die NULLen aufgefuellt; default: Falsch
wenn timeStart != None: es wird an exisitierende .h5s angehaengt; sonst werden diese ueberschrieben
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# _TCxxx.h5 anlegen (OPC, SirCalc, LDSIn, LDSRes1, LDSRes2 (,LDSRes)) and _CVD.h5
# ueber alle dfs in H5 (unter Berücksichtigung von timeStart und timeEnd)
# lesen
# TC-Teilmenge ermitteln: 'ID','ProcessTime','ScenTime','SubSystem','Value','Direction'
# Zeilen mit 'Value' isnull() werden NICHT gelesen
# d.h. bei einer Logfile-Semantik welche durch NULL-Zeilen einen Wert auf (was auch immer) zuruecksetzt wuerde der Wert bei einer Stop-Plot-Ausgabe auf dem letzten Nicht-NULL Wert verharren ...
# ... zunaechst ...
# Untermengen bilden: ['TCsdfOPC','TCsdfSirCalc','TCsdfLDSIn','TCsdfLDSRes1','TCsdfLDSRes2' (,'TCsdfLDSRes')]
# ... NULLen (NaNs) entstehen durch die Pivotierung mit Index = Time: nicht fuer alles Times (Obermenge) gibt es fuer jede ID Values
# speichern
(name,ext)=os.path.splitext(self.h5File)
TCPost='_TC'
self.h5FileOPC=name+TCPost+'OPC'+ext
self.h5FileSirCalc=name+TCPost+'SirCalc'+ext
self.h5FileLDSIn=name+TCPost+'LDSIn'+ext
if not dfID.empty:
# Attribute
self.h5FileLDSRes1=name+TCPost+'LDSRes1'+ext
self.h5FileLDSRes2=name+TCPost+'LDSRes2'+ext
# Komplement wird geloescht
h5FileLDSRes=name+TCPost+'LDSRes'+ext
try:
# wenn TC-H5 existiert wird es geloescht
if os.path.exists(h5FileLDSRes):
os.remove(h5FileLDSRes)
logger.debug("{0:s}Existing H5-File {1:s} deleted.".format(logStr,h5FileLDSRes))
del self.h5FileLDSRes
except:
pass
else:
# Attribut
self.h5FileLDSRes=name+TCPost+'LDSRes'+ext
# Komplemente werden geloescht
h5FileLDSRes1=name+TCPost+'LDSRes1'+ext
h5FileLDSRes2=name+TCPost+'LDSRes2'+ext
try:
# wenn TC-H5 existiert wird es geloescht
if os.path.exists(h5FileLDSRes1):
os.remove(h5FileLDSRes1)
logger.debug("{0:s}Existing H5-File {1:s} deleted.".format(logStr,h5FileLDSRes1))
# wenn TC-H5 existiert wird es geloescht
if os.path.exists(h5FileLDSRes2):
os.remove(h5FileLDSRes2)
logger.debug("{0:s}Existing H5-File {1:s} deleted.".format(logStr,h5FileLDSRes2))
del self.h5FileLDSRes1
del self.h5FileLDSRes2
except:
pass
self.h5FileCVD=name+'_'+'CVD'+ext
h5Keys,h5KeysPost=self.__getH5Keys(timeStart=timeStart,timeEnd=timeEnd)
h5KeysOPC=['TCsOPC'+x for x in h5KeysPost]
h5KeysSirCalc=['TCsSirCalc'+x for x in h5KeysPost]
h5KeysLDSIn=['TCsLDSIn'+x for x in h5KeysPost]
h5KeysLDSRes1=['TCsLDSRes1'+x for x in h5KeysPost]
h5KeysLDSRes2=['TCsLDSRes2'+x for x in h5KeysPost]
h5KeysLDSRes=['TCsLDSRes'+x for x in h5KeysPost]
h5KeysCVD=['CVDRes'+x for x in h5KeysPost]
h5KeysAll=zip(h5Keys,h5KeysOPC,h5KeysSirCalc,h5KeysLDSIn,h5KeysLDSRes1,h5KeysLDSRes2,h5KeysLDSRes,h5KeysCVD)
for idx,(h5Key,h5KeyOPC,h5KeySirCalc,h5KeyLDSIn,h5KeyLDSRes1,h5KeyLDSRes2,h5KeyLDSRes,h5KeyCVD) in enumerate(h5KeysAll):
#H5-Write-Modus
if idx==0:
if timeStart!=None:
mode='a'
else:
mode='w'
else:
mode='a'
logger.info("{0:s}Get (read_hdf) df with h5Key: {1:s} ...".format(logStr,h5Key))
df=pd.read_hdf(self.h5File, key=h5Key)
# CVD -------------------------------------------------------------------------------------------------
dfCVD=df[df['SubSystem']=='CVD']
df=df[['ID','ProcessTime','ScenTime','SubSystem','Value','Direction']]
df['Value']=df['Value'].apply(lambda x: fTCCast(x))
df=df[~(df['Value'].isnull())]
if not dfID.empty:
TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1,TCsdfLDSRes2=self.getTCsFromDf(df,dfID=dfID,TCsdfOPCFill=TCsdfOPCFill)
else:
TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes=self.getTCsFromDf(df,dfID=dfID,TCsdfOPCFill=TCsdfOPCFill)
logger.debug("{0:s}{1:s}".format(logStr,'Write ...'))
TCsdfOPC.to_hdf(self.h5FileOPC,h5KeyOPC, mode=mode)
TCsdfSirCalc.to_hdf(self.h5FileSirCalc,h5KeySirCalc, mode=mode)
TCsdfLDSIn.to_hdf(self.h5FileLDSIn,h5KeyLDSIn, mode=mode)
if not dfID.empty:
TCsdfLDSRes1.to_hdf(self.h5FileLDSRes1,h5KeyLDSRes1, mode=mode)
TCsdfLDSRes2.to_hdf(self.h5FileLDSRes2,h5KeyLDSRes2, mode=mode)
else:
TCsdfLDSRes.to_hdf(self.h5FileLDSRes,h5KeyLDSRes, mode=mode)
# ---
dfCVD.to_hdf(self.h5FileCVD,h5KeyCVD, mode=mode)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return
def shrinkH5File(self):
"""
die dfs werden geloescht im H5-File
extract TCs to H5s ### MUSS ### vorher gelaufen sein
nach shrinkH5File stehen im Master-H5 die eigentlichen Daten nicht mehr zur Verfuegung
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# H5 existiert
if os.path.exists(self.h5File):
# Keys available
with pd.HDFStore(self.h5File) as h5Store:
h5Keys=sorted(h5Store.keys()) # /Log20201216_0000001
logger.debug("{0:s}h5Keys available: {1:s}".format(logStr,str(h5Keys)))
for key in h5Keys:
if re.match('(^/Log)',key):
logger.debug("{0:s}key removed: {1:s}".format(logStr,str(key)))
h5Store.remove(key.replace(h5KeySep,''))
else:
logger.debug("{0:s}key NOT removed: {1:s}".format(logStr,str(key)))
with pd.HDFStore(self.h5File) as h5Store:
pass
shrinkCmd="ptrepack --chunkshape=auto --propindexes --complib=blosc "+self.h5File+" "+self.h5File+".Shrinked"
logger.debug("{0:s}shrinkCmd: {1:s}".format(logStr,shrinkCmd))
if os.path.exists(self.h5File+".Shrinked"):
os.remove(self.h5File+".Shrinked")
os.system(shrinkCmd)
os.remove(self.h5File)
os.rename(self.h5File+".Shrinked",self.h5File)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def get(self,timeStart=None,timeEnd=None,filter_fct=None,filterAfter=True,useRawHdfAPI=False):
"""
returns df with filter_fct applied
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfRet=None
try:
dfLst=[]
dfLookUpTimes=self.lookUpDf
if timeStart!=None:
dfLookUpTimes=dfLookUpTimes[dfLookUpTimes['LastTime']>=timeStart] # endet nach dem Anfang oder EndeFile ist Anfang
if timeEnd!=None:
dfLookUpTimes=dfLookUpTimes[dfLookUpTimes['FirstTime']<=timeEnd] # beginnt vor dem Ende oder AnfangFile ist Ende
dfLookUpTimesIdx=dfLookUpTimes.set_index('logName')
dfLookUpTimesIdx.filter(regex='\.log$',axis=0)
h5Keys=['Log'+re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
logger.debug("{0:s}h5Keys used: {1:s}".format(logStr,str(h5Keys)))
if useRawHdfAPI:
with pd.HDFStore(self.h5File) as h5Store:
for h5Key in h5Keys:
logger.debug("{0:s}Get (pd.HDFStore) df with h5Key: {1:s} ...".format(logStr,h5Key))
df=h5Store[h5Key]
if not filterAfter and filter_fct != None:
logger.debug("{0:s}Apply Filter ...".format(logStr))
df=pd.DataFrame(df[df.apply(filter_fct,axis=1)].values,columns=df.columns)
dfLst.append(df)
else:
for h5Key in h5Keys:
logger.debug("{0:s}Get (read_hdf) df with h5Key: {1:s} ...".format(logStr,h5Key))
df=pd.read_hdf(self.h5File, key=h5Key)
if not filterAfter and filter_fct != None:
logger.debug("{0:s}Apply Filter ...".format(logStr))
df=pd.DataFrame(df[df.apply(filter_fct,axis=1)].values,columns=df.columns)
dfLst.append(df)
logger.debug("{0:s}{1:s}".format(logStr,'Extraction finished. Concat ...'))
dfRet=pd.concat(dfLst)
del dfLst
if filterAfter and filter_fct != None:
logger.debug("{0:s}Apply Filter ...".format(logStr))
dfRet=pd.DataFrame(dfRet[dfRet.apply(filter_fct,axis=1)].values,columns=dfRet.columns)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfRet
def getFromZips(self,timeStart=None,timeEnd=None,filter_fct=None,filterAfter=True,readWithDictReader=False,readWindowsLog=False):
"""
returns df from Zips
die Daten werden von den Zips gelesen: Log extrahieren, parsen, wieder loeschen
die Initalisierung muss mit AppLog(zip7Files=...) erfolgt sein da nur dann self.lookUpDfZips existiert
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfRet=None
try:
dfLst=[]
timeStart=pd.Timestamp(timeStart)
timeEnd=pd.Timestamp(timeEnd)
# zips die prozessiert werden muessen
dfLookUpZips=self.lookUpDfZips
if timeStart!=None:
dfLookUpZips=dfLookUpZips[dfLookUpZips['LastTime']>=timeStart] # endet nach dem Anfang oder EndeFile ist Anfang
if timeEnd!=None:
dfLookUpZips=dfLookUpZips[dfLookUpZips['FirstTime']<=timeEnd] # beginnt vor dem Ende oder AnfangFile ist Ende
for index, row in dfLookUpZips.iterrows():
zip7File=index
(zip7FileHead, zip7FileTail)=os.path.split(zip7File)
dTime=timeStart-row['FirstTime']
nStart = int(dTime.total_seconds()/row['TimespanPerLog'].total_seconds())
dTime=timeEnd-timeStart
nDelta = int(dTime.total_seconds()/row['TimespanPerLog'].total_seconds())+1
nEnd=nStart+nDelta
logger.debug("{0:s}zip7File: {1:s}: Start: {2:d}/{3:07d} End: {4:d}/{5:07d}".format(logStr,zip7FileTail
,nStart,nStart+row['minFileNr']
,nStart+nDelta,nStart+row['minFileNr']+nDelta))
try:
# wenn zip7File nicht existiert ...
if not os.path.exists(zip7File):
logStrFinal="{0:s}zip7File {1:s} not existing.".format(logStr,zip7File)
logger.debug(logStrFinal)
raise LxError(logStrFinal)
tmpDir=os.path.dirname(zip7File)
tmpDirContent=glob.glob(tmpDir)
with py7zr.SevenZipFile(zip7File, 'r') as zip7FileObj:
allLogFiles = zip7FileObj.getnames()
allLogFilesLen=len(allLogFiles)
logger.debug("{0:s}{1:s}: len(getnames()): {2:d}.".format(logStr,zip7FileTail,allLogFilesLen))
extDirLstTBDeleted=[]
extDirLstExistingLogged=[]
idxEff=0
for idx,logFileNameInZip in enumerate(allLogFiles):
if idx < nStart-idxEff or idx > nEnd+idxEff:
continue
logger.debug("{0:s}idx: {1:d} item: {2:s} ...".format(logStr,idx,logFileNameInZip))
# die Datei die 7Zip bei extract erzeugen wird
logFile=os.path.join(tmpDir,logFileNameInZip)
(logFileHead, logFileTail)=os.path.split(logFile)
# evtl. bezeichnet logFileNameInZip keine Datei sondern ein Verzeichnis
(name, ext)=os.path.splitext(logFileNameInZip)
if ext == '':
# Verzeichnis!
extDir=os.path.join(tmpDir,logFileNameInZip)
(extDirHead, extDirTail)=os.path.split(extDir)
if os.path.exists(extDir) and extDir in tmpDirContent:
logger.debug("{0:s}idx: {1:d} extDir: {2:s} existiert(e) bereits.".format(logStr,idx,extDirTail))
extDirLstExistingLogged.append(extDir)
elif os.path.exists(extDir) and extDir not in tmpDirContent:
logger.debug("{0:s}idx: {1:d} extDir: {2:s} existiert(e) noch nicht.".format(logStr,idx,extDirTail))
extDirLstTBDeleted.append(extDir)
elif not os.path.exists(extDir) and extDir not in tmpDirContent:
logger.debug("{0:s}idx: {1:d} extDir: {2:s} existiert(e) noch nicht.".format(logStr,idx,extDirTail))
extDirLstTBDeleted.append(extDir)
# kein Logfile zu prozessieren ...
idxEff+=1
continue
# logFileNameInZip bezeichnet eine Datei
if os.path.exists(logFile):
isFile = os.path.isfile(logFile)
if isFile:
logger.debug("{0:s}idx: {1:d} Log: {2:s} existiert bereits. Wird durch Extrakt ueberschrieben werden.".format(logStr,idx,logFileTail))
logFileTBDeleted=False
else:
logFileTBDeleted=False
else:
logger.debug("{0:s}idx: {1:d} Log: {2:s} existiert nicht. Wird extrahiert, dann prozessiert und dann wieder geloescht.".format(logStr,idx,logFileTail))
logFileTBDeleted=True
# extrahieren
zip7FileObj.extract(path=tmpDir,targets=logFileNameInZip)
if os.path.exists(logFile):
pass
else:
logger.warning("{0:s}idx: {1:d} Log: {2:s} NOT extracted?! Continue with next Name in 7Zip.".format(logStr,idx,logFileTail))
# nichts zu prozessieren ...
continue
# ...
if os.path.isfile(logFile):
df = self.__processALogFile(logFile=logFile,readWithDictReader=readWithDictReader,readWindowsLog=readWindowsLog)
if df is None:
logger.warning("{0:s}idx: {1:d} Log: {2:s} NOT processed?! Continue with next Name in 7Zip.".format(logStr,idx,logFileTail))
# nichts zu prozessieren ...
continue
else:
if not filterAfter and filter_fct != None:
logger.debug("{0:s}Apply Filter ...".format(logStr))
df=pd.DataFrame(df[df.apply(filter_fct,axis=1)].values,columns=df.columns)
dfLst.append(df)
# ...
# gleich wieder loeschen
if os.path.exists(logFile) and logFileTBDeleted:
if os.path.isfile(logFile):
os.remove(logFile)
logger.debug("{0:s}idx: {1:d} Log: {2:s} wieder geloescht.".format(logStr,idx,logFileTail))
for dirName in extDirLstTBDeleted:
if os.path.exists(dirName):
if os.path.isdir(dirName):
(dirNameHead, dirNameTail)=os.path.split(dirName)
if len(os.listdir(dirName)) == 0:
os.rmdir(dirName)
logger.debug("{0:s}dirName: {1:s} existierte nicht und wurde wieder geloescht.".format(logStr,dirNameTail))
else:
logger.info("{0:s}dirName: {1:s} existiert mit nicht leerem Inhalt?!".format(logStr,dirNameTail))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
logger.debug("{0:s}{1:s}".format(logStr,'Extraction finished. Concat ...'))
dfRet=pd.concat(dfLst)
del dfLst
if filterAfter and filter_fct != None:
logger.debug("{0:s}Apply Filter ...".format(logStr))
dfRet=pd.DataFrame(dfRet[dfRet.apply(filter_fct,axis=1)].values,columns=dfRet.columns)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfRet
def getTCs(self,dfID=pd.DataFrame(),timeStart=None,timeEnd=None,TCsdfOPCFill=TCsdfOPCFill,persistent=False,overwrite=True):
"""
returns TCs-dfs
Verarbeitung von dfs gemaess extractTCsToH5s; siehe dort
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
TCKeys=['<KEY>','TCsdfSirCalc','TCsdfLDSIn','TCsdfLDSRes1','TCsdfLDSRes2a','TCsdfLDSRes2b','TCsdfLDSRes2c']
if persistent:
with pd.HDFStore(self.h5File) as h5Store:
h5Keys=sorted(h5Store.keys())
#logger.debug("{0:s}h5Keys available: {1:s}".format(logStr,str(h5Keys)))
h5KeysStripped=[item.replace(h5KeySep,'') for item in h5Keys]
if set(TCKeys) & set(h5KeysStripped) == set(TCKeys):
if not overwrite:
logger.debug("{0:s}persistent: TCKeys {1:s} existieren alle bereits - return aus H5-File ...".format(logStr,str(TCKeys)))
TCsdfOPC=pd.read_hdf(self.h5File,key='<KEY>')
TCsdfSirCalc=pd.read_hdf(self.h5File,key='TCsdfSirCalc')
TCsdfLDSIn=pd.read_hdf(self.h5File,key='TCsdfLDSIn')
TCsdfLDSRes1=pd.read_hdf(self.h5File,key='TCsdfLDSRes1')
TCsdfLDSRes2a=pd.read_hdf(self.h5File,key='TCsdfLDSRes2a')
TCsdfLDSRes2b=pd.read_hdf(self.h5File,key='TCsdfLDSRes2b')
TCsdfLDSRes2c=pd.read_hdf(self.h5File,key='TCsdfLDSRes2c')
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1,TCsdfLDSRes2a,TCsdfLDSRes2b,TCsdfLDSRes2c
else:
logger.debug("{0:s}persistent: TCKeys {1:s} existieren alle bereits - sollen aber ueberschrieben werden ...".format(logStr,str(TCKeys)))
else:
logger.debug("{0:s}persistent: TCKeys {1:s} existieren nicht (alle) ...".format(logStr,str(TCKeys)))
dfLookUpTimes=self.lookUpDf
if timeStart!=None:
dfLookUpTimes=dfLookUpTimes[dfLookUpTimes['LastTime']>=timeStart] # endet nach dem Anfang oder EndeFile ist Anfang
if timeEnd!=None:
dfLookUpTimes=dfLookUpTimes[dfLookUpTimes['FirstTime']<=timeEnd] # beginnt vor dem Ende oder AnfangFile ist Ende
dfLookUpTimesIdx=dfLookUpTimes.set_index('logName')
dfLookUpTimesIdx.filter(regex='\.log$',axis=0)
h5Keys=['Log'+re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
logger.debug("{0:s}h5Keys used: {1:s}".format(logStr,str(h5Keys)))
dfLst=[]
for h5Key in h5Keys:
logger.debug("{0:s}Get (read_hdf) df with h5Key: {1:s} ...".format(logStr,h5Key))
dfSingle=pd.read_hdf(self.h5File, key=h5Key)
dfSingle=dfSingle[['ID','ProcessTime','ScenTime','SubSystem','Value','Direction']]
dfSingle=dfSingle[~(dfSingle['Value'].isnull())]
dfLst.append(dfSingle)
logger.debug("{0:s}{1:s}".format(logStr,'Extraction finished. Concat ...'))
df=pd.concat(dfLst)
del dfLst
logger.debug("{0:s}{1:s}".format(logStr,'Concat finished. Filter & Pivot ...'))
if not dfID.empty:
TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1,TCsdfLDSRes2=self.getTCsFromDf(df,dfID=dfID,TCsdfOPCFill=TCsdfOPCFill)
else:
TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes=self.getTCsFromDf(df,dfID=dfID,TCsdfOPCFill=TCsdfOPCFill)
if persistent:
logger.debug("{0:s}peristent: TCKeys {1:s} nach H5-File ...".format(logStr,str(TCKeys)))
TCsdfOPC.to_hdf(self.h5File,key='TCsdfOPC')
TCsdfSirCalc.to_hdf(self.h5File,key='TCsdfSirCalc')
TCsdfLDSIn.to_hdf(self.h5File,key='TCsdfLDSIn')
TCsdfLDSRes1.to_hdf(self.h5File,key='TCsdfLDSRes1')
TCsdfLDSRes2a.to_hdf(self.h5File,key='TCsdfLDSRes2a')
TCsdfLDSRes2b.to_hdf(self.h5File,key='TCsdfLDSRes2b')
TCsdfLDSRes2c.to_hdf(self.h5File,key='TCsdfLDSRes2c')
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
if not dfID.empty:
return TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1,TCsdfLDSRes2#a,TCsdfLDSRes2b,TCsdfLDSRes2c
else:
return TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1
def getTCsFromH5s(self,timeStart=None,timeEnd=None, LDSResOnly=False, LDSResColsSpecified=None, LDSResTypeSpecified=None, timeShiftPair=None):
"""
returns several TC-dfs from TC-H5s:
TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1,TCsdfLDSRes2
or
TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes
LDSResOnly:
TCsdfLDSRes1,TCsdfLDSRes2
or
TCsdfLDSRes
LDSResColsSpecified:
return in LDSRes df(s) only the specified cols
all cols are returned otherwise
LDSResTypeSpecified:
return TCsdfLDSRes1 (SEG) for 'SEG' or TCsdfLDSRes2 (Druck) for 'Druck'
both are returned otherwise
timeShiftPair: (preriod,freq): i.e. (1,'H'); if not None index is shifted
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
try:
self.h5FileLDSRes1
Res2=True
except:
Res2=False
TCsdfOPC=pd.DataFrame()
TCsdfSirCalc=pd.DataFrame()
TCsdfLDSIn=pd.DataFrame()
if Res2:
TCsdfLDSRes1=pd.DataFrame()
TCsdfLDSRes2=pd.DataFrame()
else:
TCsdfLDSRes=pd.DataFrame()
dfLookUpTimes=self.lookUpDf
if timeStart!=None:
dfLookUpTimes=dfLookUpTimes[dfLookUpTimes['LastTime']>=timeStart] # endet nach dem Anfang oder EndeFile ist Anfang
if timeEnd!=None:
dfLookUpTimes=dfLookUpTimes[dfLookUpTimes['FirstTime']<=timeEnd] # beginnt vor dem Ende oder AnfangFile ist Ende
dfLookUpTimesIdx=dfLookUpTimes.set_index('logName')
dfLookUpTimesIdx.filter(regex='\.log$',axis=0)
h5Keys=['Log'+re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
logger.debug("{0:s}h5Keys used: {1:s}".format(logStr,str(h5Keys)))
h5KeysOPC=['TCsOPC'+re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
h5KeysSirCalc=['TCsSirCalc'+re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
h5KeysLDSIn=['TCsLDSIn'+re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
h5KeysLDSRes1=['TCsLDSRes1'+re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
h5KeysLDSRes2=['TCsLDSRes2'+re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
h5KeysLDSRes=['TCsLDSRes'+re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
h5KeysAll=zip(h5Keys,h5KeysOPC,h5KeysSirCalc,h5KeysLDSIn,h5KeysLDSRes1,h5KeysLDSRes2,h5KeysLDSRes)
for idx,(h5Key,h5KeyOPC,h5KeySirCalc,h5KeyLDSIn,h5KeyLDSRes1,h5KeyLDSRes2,h5KeyLDSRes) in enumerate(h5KeysAll):
if not LDSResOnly:
#logger.debug("{0:s}{1:s}".format(logStr,'TCsdfOPC ...'))
TCsdfOPC=pd.read_hdf(self.h5FileOPC,h5KeyOPC)
#logger.debug("{0:s}{1:s}".format(logStr,'TCsdfSirCalc ...'))
TCsdfSirCalc=pd.read_hdf(self.h5FileSirCalc,h5KeySirCalc)
#logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSIn ...'))
TCsdfLDSIn=pd.read_hdf(self.h5FileLDSIn,h5KeyLDSIn)
if Res2:
if LDSResTypeSpecified == None or LDSResTypeSpecified=='SEG':
#logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSRes1 ...'))
TCsdfLDSRes1=pd.read_hdf(self.h5FileLDSRes1,h5KeyLDSRes1)
if LDSResTypeSpecified == None or LDSResTypeSpecified=='Druck':
#logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSRes2 ...'))
TCsdfLDSRes2=pd.read_hdf(self.h5FileLDSRes2,h5KeyLDSRes2)
else:
#logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSRes ...'))
TCsdfLDSRes=pd.read_hdf(self.h5FileLDSRes,h5KeyLDSRes)
if LDSResColsSpecified != None:
if Res2:
if LDSResTypeSpecified == None or LDSResTypeSpecified=='SEG':
#logger.debug("{0:s}{1:s} {2:s}".format(logStr,'TCsdfLDSRes1 Filter ...',str(LDSResColsSpecified)))
TCsdfLDSRes1=TCsdfLDSRes1.filter(items=LDSResColsSpecified)
if LDSResTypeSpecified == None or LDSResTypeSpecified=='Druck':
#logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSRes2 Filter ...'))
TCsdfLDSRes2=TCsdfLDSRes2.filter(items=LDSResColsSpecified)
else:
#logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSRes Filter ...'))
TCsdfLDSRes=TCsdfLDSRes.filter(items=LDSResColsSpecified)
if idx==0:
if not LDSResOnly:
TCsdfOPCLst=[]
TCsdfSirCalcLst=[]
TCsdfLDSInLst=[]
if Res2:
if LDSResTypeSpecified == None or LDSResTypeSpecified=='SEG':
TCsdfLDSRes1Lst=[]
if LDSResTypeSpecified == None or LDSResTypeSpecified=='Druck':
TCsdfLDSRes2Lst=[]
else:
TCsdfLDSResLst=[]
#logger.debug("{0:s}Append ...".format(logStr))
if not LDSResOnly:
TCsdfOPCLst.append(TCsdfOPC)
TCsdfSirCalcLst.append(TCsdfSirCalc)
TCsdfLDSInLst.append(TCsdfLDSIn)
if Res2:
if LDSResTypeSpecified == None or LDSResTypeSpecified=='SEG':
TCsdfLDSRes1Lst.append(TCsdfLDSRes1)
if LDSResTypeSpecified == None or LDSResTypeSpecified=='Druck':
TCsdfLDSRes2Lst.append(TCsdfLDSRes2)
else:
TCsdfLDSResLst.append(TCsdfLDSRes)
logger.debug("{0:s}Concat ...".format(logStr))
if not LDSResOnly:
TCsdfOPC=pd.concat(TCsdfOPCLst)
TCsdfSirCalc=pd.concat(TCsdfSirCalcLst)
TCsdfLDSIn=pd.concat(TCsdfLDSInLst)
if timeShiftPair != None:
(period,freq)=timeShiftPair
logger.debug("{0:s}timeShift TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn by {1:d} {2:s} ...".format(logStr,period,freq))
for df in TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn:
df.index=df.index.shift(period,freq=freq)
if Res2:
if LDSResTypeSpecified == None or LDSResTypeSpecified=='SEG':
TCsdfLDSRes1=pd.concat(TCsdfLDSRes1Lst)
if LDSResTypeSpecified == None or LDSResTypeSpecified=='Druck':
TCsdfLDSRes2=pd.concat(TCsdfLDSRes2Lst)
if timeShiftPair != None:
(period,freq)=timeShiftPair
if LDSResTypeSpecified == None or LDSResTypeSpecified=='SEG':
#for df in TCsdfLDSRes1:
logger.debug("{:s}timeShift LDSRes1 by {:d} {:s} Ist: {!s:s} {!s:s} ...".format(logStr,period,freq,TCsdfLDSRes1.index[0],TCsdfLDSRes1.index[-1]))
TCsdfLDSRes1.index=TCsdfLDSRes1.index.shift(period,freq=freq)
if LDSResTypeSpecified == None or LDSResTypeSpecified=='Druck':
#for df in TCsdfLDSRes2:
logger.debug("{:s}timeShift LDSRes2 by {:d} {:s} Ist: {!s:s} {!s:s} ...".format(logStr,period,freq,TCsdfLDSRes2.index[0],TCsdfLDSRes2.index[-1]))
TCsdfLDSRes2.index=TCsdfLDSRes2.index.shift(period,freq=freq)
else:
TCsdfLDSRes=pd.concat(TCsdfLDSResLst)
if timeShiftPair != None:
(period,freq)=timeShiftPair
logger.debug("{0:s}timeShift LDSRes by {1:d} {2:s} ...".format(logStr,period,freq))
for df in TCsdfLDSRes:
df.index=df.index.shift(period,freq=freq)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
if not LDSResOnly:
if Res2:
return TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1,TCsdfLDSRes2
else:
return TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes
else:
if Res2:
if LDSResTypeSpecified == None:
return TCsdfLDSRes1,TCsdfLDSRes2
elif LDSResTypeSpecified=='SEG':
return TCsdfLDSRes1
elif LDSResTypeSpecified=='Druck':
return TCsdfLDSRes2
else:
return TCsdfLDSRes
def __getH5Keys(self,timeStart=None,timeEnd=None):
"""
returns h5Keys (keys fuer Logfiles in h5File), h5KeysPost (key Postfixe fuer dfs in allen anderen h5Files)
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
dfLookUpTimes=self.lookUpDf
if timeStart!=None:
dfLookUpTimes=dfLookUpTimes[dfLookUpTimes['LastTime']>=timeStart] # endet nach dem Anfang oder EndeFile ist Anfang
if timeEnd!=None:
dfLookUpTimes=dfLookUpTimes[dfLookUpTimes['FirstTime']<=timeEnd] # beginnt vor dem Ende oder AnfangFile ist Ende
dfLookUpTimesIdx=dfLookUpTimes.set_index('logName')
#dfLookUpTimesIdx.filter(regex='\.log$',axis=0)
h5Keys=['Log'+re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
logger.debug("{0:s}h5Keys: {1:s}".format(logStr,str(h5Keys)))
h5KeysPost=[re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
logger.debug("{0:s}h5KeysPost: {1:s}".format(logStr,str(h5KeysPost)))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return h5Keys,h5KeysPost
def getCVDFromH5(self,timeStart=None,timeEnd=None,timeDelta=None,returnDfCVDataOnly=False):
"""
returns dfCVD, dfCVDataOnly
dfCVD: all rows with Subsystem CVD
dfCVDataOnly: CVs from dfCVD
timeDelta: i.e. pd.Timedelta('1 Hour'); if not None ScenTime is shifted by + timeDelta
returns dfCVDataOnly only, if returnDfCVDataOnly
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfCVD=pd.DataFrame()
dfCVDataOnly=pd.DataFrame()
try:
h5Keys,h5KeysPost=self.__getH5Keys(timeStart=timeStart,timeEnd=timeEnd)
h5KeysCVD=['CVDRes'+x for x in h5KeysPost]
if timeDelta != None:
logger.debug("{0:s}timeShift ScenTime by + {1:s} ...".format(logStr,str(timeDelta)))
for idx,h5KeyCVD in enumerate(h5KeysCVD):
dfCVD=pd.read_hdf(self.h5FileCVD,h5KeyCVD)
if timeDelta != None:
#logger.debug("{0:s}timeShift ScenTime by + {1:s} ...".format(logStr,str(timeDelta)))
dfCVD['ScenTime']=dfCVD['ScenTime']+timeDelta
dfCVDataOnly=pd.DataFrame()
dfCVDBEGIN=dfCVD[dfCVD['Remark'].str.contains('^BEGIN_OF_NEW_CONTROL_VOLUME')].copy(deep=True)
if not dfCVDBEGIN.empty:
try:
dfCVDBEGIN['ZHKNR']=dfCVDBEGIN['Value'].astype('int64')
except:
logger.debug("{:s}Parsen von Value nach ZHKNR in Zeile mit BEGIN_OF_NEW_CONTROL_VOLUME schlaegt fehlt. Vmtl. aeltere App-Log Version.".format(logStr))
dfCVDBEGIN['ZHKNR']=-1
dfCVDBEGIN_Remarks=dfCVDBEGIN['Remark'].str.split(pat=';',expand=True)
#logger.debug("{:s}dfCVDBEGIN_Remarks: {:s}".format(logStr,dfCVDBEGIN_Remarks.to_string()))
try:
dfCVDNames=dfCVDBEGIN_Remarks[dfCVDBEGIN_Remarks[0].str.contains('^BEGIN_OF_NEW_CONTROL_VOLUME')][[1]][1].str.replace('NULL','')
except:
logger.debug("{:s}Split von Remark mit ; schlug vmtl. fehl. Vmtl. aeltere App-Log Version.".format(logStr))
dfCVDBEGIN_Remarks=dfCVDBEGIN['Remark'].str.split(pat='\t',expand=True)
#logger.debug("{:s}dfCVDBEGIN_Remarks: {:s}".format(logStr,dfCVDBEGIN_Remarks.to_string()))
dfCVDNames=dfCVDBEGIN_Remarks[dfCVDBEGIN_Remarks[0].str.contains('^BEGIN_OF_NEW_CONTROL_VOLUME')][[1]][1].str.replace('NULL','')
dfCVDBEGIN=dfCVDBEGIN.join(dfCVDNames).rename(columns={1:'Name'})
dfCVDataOnly=dfCVDBEGIN[['ScenTime','ZHKNR','ID','Name']].rename(columns={'ID':'Type'}).reset_index(drop=True)
if idx==0:
if not returnDfCVDataOnly:
dfCVDLst=[]
dfCVDataOnlyLst=[]
# Liste ergaenzen
if not returnDfCVDataOnly:
dfCVDLst.append(dfCVD)
if not dfCVDataOnly.empty:
dfCVDataOnlyLst.append(dfCVDataOnly)
# Listen verketten und Nachbereitung
if not returnDfCVDataOnly:
dfCVD=pd.concat(dfCVDLst)
dfCVD=dfCVD.reset_index(drop=True)
dfCVDataOnly=pd.concat(dfCVDataOnlyLst)
dfCVDataOnly=dfCVDataOnly.reset_index(drop=True)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
if returnDfCVDataOnly:
return dfCVDataOnly
else:
return dfCVD,dfCVDataOnly
def getTCsSpecified(self,dfID= | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import theano
import theano.tensor as tt
def get_variational_scores(result, config, model, inference, true_pop_size):
approx_params = list(inference.approx.shared_params.values())
distance = abs(model.pop_size - true_pop_size)/true_pop_size
input_vars = tt.dvectors(len(approx_params))
distance_sample = inference.approx.sample_node(distance, size=config['n_eval_samples'], more_replacements={ shared: var for shared, var in zip(approx_params, input_vars) })
distance_mean = tt.mean(distance_sample)
distance_function = theano.function(input_vars, distance_mean)
distances = [distance_function(*[result[var][i] for var in inference.approx.shared_params.keys()]) for i in range(len(result['i']))]
return pd.DataFrame({
'date_time': result['date_time'],
'error': np.stack(distances)
})
def get_beast_scores(result_filename, config, true_pop_size):
trace_df = | pd.read_table(result_filename, parse_dates=['datetime'], comment='#') | pandas.read_table |
"""Helpers for data science.
Distributed as part of ``https://github.com/chmp/misc-exp`` under the MIT
license, (c) 2017 <NAME>.
"""
import base64
import bisect
import bz2
import collections
import datetime
import enum
import functools as ft
import gzip
import hashlib
import importlib
import inspect
import io
import itertools as it
import json
import logging
import math
import os.path
import pathlib
import pickle
import re
import sys
import threading
import time
from types import ModuleType
from typing import Any, Callable, Iterable, NamedTuple, Optional, Union
try:
from sklearn.base import (
BaseEstimator,
TransformerMixin,
ClassifierMixin,
RegressorMixin,
)
except ImportError:
from ._import_compat import ( # typing: ignore
BaseEstimator,
TransformerMixin,
ClassifierMixin,
RegressorMixin,
)
_HAS_SK_LEARN = False
else:
_HAS_SK_LEARN = True
try:
from daft import PGM
except ImportError:
from ._import_compat import PGM # typing: ignore
_HAS_DAFT = False
else:
_HAS_DAFT = True
default_sequences = (tuple,)
default_mappings = (dict,)
def reload(*modules_or_module_names: Union[str, ModuleType]) -> Optional[ModuleType]:
mod = None
for module_or_module_name in modules_or_module_names:
if isinstance(module_or_module_name, str):
module_or_module_name = importlib.import_module(module_or_module_name)
mod = importlib.reload(module_or_module_name)
return mod
def import_object(obj):
def _import_obj(obj):
module, _, name = obj.partition(":")
module = importlib.import_module(module)
return getattr(module, name)
return sapply(_import_obj, obj)
def define(func):
"""Execute a function and return its result.
The idea is to use function scope to prevent pollution of global scope in
notebooks.
Usage::
@define
def foo():
return 42
assert foo == 42
"""
return func()
def cached(path: str, validate: bool = False):
"""Similar to ``define``, but cache to a file.
:param path:
the path of the cache file to use
:param validate:
if `True`, always execute the function. The loaded result will be
passed to the function, when the cache exists. In that case the
function should return the value to use. If the returned value is
not identical to the loaded value, the cache is updated with the
new value.
Usage::
@cached('./cache/result')
def dataset():
...
return result
or::
@cached('./cache/result', validate=True)
def model(result=None):
if result is not None:
# running to validate ...
return result
"""
def update_cache(result):
print("save cache", path)
with open(path, "wb") as fobj:
pickle.dump(result, fobj)
def load_cache():
print("load cache", path)
with open(path, "rb") as fobj:
return pickle.load(fobj)
def decorator(func):
if os.path.exists(path):
result = load_cache()
if not validate:
return result
else:
print("validate")
new_result = func(result)
if new_result is not result:
update_cache(new_result)
return new_result
else:
print("compute")
result = func()
update_cache(result)
return result
return decorator
class Object:
"""Dictionary-like namespace object."""
def __init__(*args, **kwargs):
self, *args = args
if len(args) > 1:
raise ValueError(
"Object(...) can be called with at " "most one positional argument"
)
elif len(args) == 0:
seed = {}
else:
seed, = args
if not isinstance(seed, collections.Mapping):
seed = vars(seed)
for k, v in dict(seed, **kwargs).items():
setattr(self, k, v)
def __repr__(self):
return "Object({})".format(
", ".join("{}={!r}".format(k, v) for k, v in vars(self).items())
)
def __eq__(self, other):
return type(self) == type(other) and vars(self) == vars(other)
def __ne__(self, other):
return not (self == other)
def update_kwargs_signature(*parents, remove_kwargs=True, kwargs_name="kwargs"):
"""Replace kwargs with the parameters from parents.
If no parents are given, this function assumes the decorated object
is a class and takes the bases of the class as the parents.
For classes, the init function is updated.
Usage::
@update_kwargs_signature()
class MyObject(Base1, Base2, Base2):
def __init__(self, arg1, arg2, **kwargs):
super().__init__(**kwargs)
Inspired by `<https://www.fast.ai/2019/08/06/delegation/>`_
"""
def decorator(child, parents=parents):
if not parents:
assert inspect.isclass(child)
parents = child.__bases__
parents = [
parent.__init__ if inspect.isclass(parent) else parent for parent in parents
]
_update_kwargs_signature(
parents=parents,
child=child.__init__ if inspect.isclass(child) else child,
remove_kwargs=remove_kwargs,
kwargs_name=kwargs_name,
)
return child
return decorator
def _update_kwargs_signature(parents, child, *, remove_kwargs, kwargs_name):
"""Update the child new signature with parameters from parent / child merged."""
def is_kw_like(desc):
# assume any parameter with a default / or kw-only is "keyword-like"
return (
desc.default != inspect.Signature.empty
or desc.kind == inspect.Parameter.KEYWORD_ONLY
)
merged_parameters = inspect.signature(child).parameters.copy()
for parent in parents:
additional_parameters = {
name: desc
for name, desc in inspect.signature(parent).parameters.items()
if name not in merged_parameters and is_kw_like(desc)
}
merged_parameters.update(additional_parameters)
if remove_kwargs:
merged_parameters.pop(kwargs_name)
child.__signature__ = inspect.signature(child).replace(
parameters=merged_parameters.values()
)
class daterange:
"""A range of dates."""
start: datetime.date
end: datetime.date
step: datetime.timedelta
@classmethod
def around(cls, dt, start, end, step=None):
if not isinstance(start, datetime.timedelta):
start = datetime.timedelta(days=start)
if not isinstance(end, datetime.timedelta):
end = datetime.timedelta(days=end)
if step is None:
step = datetime.timedelta(days=1)
elif not isinstance(step, datetime.timedelta):
step = datetime.timedelta(days=step)
return cls(dt + start, dt + end, step)
def __init__(
self,
start: datetime.date,
end: datetime.date,
step: Optional[datetime.timedelta] = None,
):
if step is None:
step = datetime.timedelta(days=1)
self.start = start
self.end = end
self.step = step
def __len__(self) -> int:
return len(self._offset_range)
def __iter__(self) -> Iterable[datetime.date]:
for offset in self._offset_range:
yield self.start + datetime.timedelta(days=offset)
def __contains__(self, item: datetime.date) -> bool:
return self._offset(item) in self._offset_range
def __getitem__(self, index: int) -> datetime.date:
return self.start + datetime.timedelta(days=self._offset_range[index])
def count(self, item: datetime.date) -> int:
return 1 if (item in self) else 0
def index(self, item):
return self._offset_range.index(self._offset(item))
def _offset(self, item: datetime.date) -> int:
return (item - self.start).days
@property
def _offset_range(self) -> range:
return range(0, (self.end - self.start).days, self.step.days)
def __repr__(self):
return f"daterange({self.start}, {self.end}, {self.step})"
class undefined_meta(type):
def __repr__(self):
return "<undefined>"
class undefined(metaclass=undefined_meta):
"""Sentinel class"""
pass
def first(iterable, default=undefined):
"""Return the first item of an iterable"""
for item in iterable:
return item
return default
def last(iterable, default=undefined):
"""Return the last item of an iterable"""
item = default
for item in iterable:
pass
return item
def item(iterable, default=undefined):
"""Given a single item iterable return this item."""
found = undefined
for item in iterable:
if found is not undefined:
raise ValueError("More than one value to unpack")
found = item
if found is not undefined:
return found
if default is not undefined:
return default
raise ValueError("Need at least one item or a default")
def collect(iterable):
result = {}
for k, v in iterable:
result.setdefault(k, []).append(v)
return result
class kvpair(NamedTuple):
key: Any
value: Any
class cell:
"""No-op context manager to allow indentation of code"""
def __init__(self, name=None):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
pass
def __call__(self, func):
with self:
func()
def colorize(items, cmap=None):
"""Given an iterable, yield ``(color, item)`` pairs.
:param cmap:
if None the color cycle is used, otherwise it is interpreted as a
colormap to color the individual items.
Note: ``items`` is fully instantiated during the iteration. For any
``list`` or ``tuple`` item only its first element is used for
colomapping.
This procedure allows for example to colormap a pandas Dataframe
grouped on a number column::
for c, (_, g) in colorize(df.groupby("g"), cmap="viridis"):
...
"""
if cmap is None:
cycle = get_color_cycle()
return zip(it.cycle(cycle), items)
else:
items = list(items)
if not items:
return iter(())
keys = [item[0] if isinstance(item, (tuple, list)) else item for item in items]
return zip(colormap(keys, cmap=cmap), items)
def get_color_cycle(n=None):
"""Return the matplotlib color cycle.
:param Optional[int] n:
if given, return a list with exactly n elements formed by repeating
the color cycle as necessary.
Usage::
blue, green, red = get_color_cycle(3)
"""
import matplotlib as mpl
cycle = mpl.rcParams["axes.prop_cycle"].by_key()["color"]
if n is None:
return it.cycle(cycle)
return list(it.islice(it.cycle(cycle), n))
def mpl_set(
box=None,
xlabel=None,
ylabel=None,
title=None,
suptitle=None,
xscale=None,
yscale=None,
caption=None,
xlim=None,
ylim=None,
xticks=None,
yticks=None,
xformatter: Optional[Callable[[float, float], str]] = None,
yformatter: Optional[Callable[[float, float], str]] = None,
left=None,
top=None,
bottom=None,
right=None,
wspace=None,
hspace=None,
subplot=None,
legend=None,
colorbar=None,
invert: Optional[str] = None,
ax=None,
grid=None,
axis=None,
):
"""Set various style related options of MPL.
:param xformatter:
if given a formatter for the major x ticks. Should have the
signature ``(x_value, pos) -> label``.
:param yformatter:
See ``xformatter``.
:param invert:
if given invert the different axes. Can be `x`, `y`, or `xy`.
"""
import matplotlib.pyplot as plt
if ax is not None:
plt.sca(ax)
if box is not None:
plt.box(box)
if subplot is not None:
ax = plt.gca()
plt.subplot(*subplot)
if xlabel is not None:
plt.xlabel(xlabel)
if ylabel is not None:
plt.ylabel(ylabel)
if title is not None:
plt.title(title)
if suptitle is not None:
plt.suptitle(suptitle)
if xscale is not None:
plt.xscale(xscale)
if yscale is not None:
plt.yscale(yscale)
# TODO: handle min/max, enlarge ...
if xlim is not None:
plt.xlim(*xlim)
if ylim is not None:
plt.ylim(*ylim)
if xticks is not None:
if isinstance(xticks, tuple):
plt.xticks(*xticks)
else:
plt.xticks(xticks)
if yticks is not None:
if isinstance(yticks, tuple):
plt.yticks(*yticks)
else:
plt.yticks(yticks)
if xformatter is not None:
plt.gca().xaxis.set_major_formatter(plt.FuncFormatter(xformatter))
if yformatter is not None:
plt.gca().yaxis.set_major_formatter(plt.FuncFormatter(yformatter))
if caption is not None:
_caption(caption)
subplot_kwargs = _dict_of_optionals(
left=left, right=right, bottom=bottom, top=top, wspace=wspace, hspace=hspace
)
if subplot_kwargs:
plt.subplots_adjust(**subplot_kwargs)
if legend is not None and legend is not False:
if legend is True:
plt.legend(loc="best")
elif isinstance(legend, str):
plt.legend(loc=legend)
else:
plt.legend(**legend)
if subplot is not None:
plt.sca(ax)
if colorbar is True:
plt.colorbar()
if invert is not None:
if "x" in invert:
plt.gca().invert_xaxis()
if "y" in invert:
plt.gca().invert_yaxis()
if grid is not None:
if not isinstance(grid, list):
grid = [grid]
for spec in grid:
if isinstance(spec, bool):
b, which, axis = spec, "major", "both"
elif isinstance(spec, str):
b, which, axis = True, "major", spec
elif isinstance(spec, tuple) and len(spec) == 2:
b, which, axis = True, spec[0], spec[1]
elif isinstance(spec, tuple):
b, which, axis = spec
else:
raise RuntimeError()
plt.grid(b, which, axis)
if axis is not None and axis is not True:
if axis is False:
axis = "off"
plt.axis(axis)
class mpl_axis:
def __init__(self, ax=None, **kwargs):
self.ax = ax
self._prev_ax = None
self.kwargs = kwargs
def __enter__(self):
import matplotlib.pyplot as plt
if plt.get_fignums():
self._prev_ax = plt.gca()
if self.ax is None:
_, self.ax = plt.subplots()
plt.sca(self.ax)
return self.ax
def __exit__(self, exc_type, exc_value, exc_tb):
import matplotlib.pyplot as plt
mpl_set(**self.kwargs)
if self._prev_ax is not None:
plt.sca(self._prev_ax)
# fake the mpl_axis signature ...
# TODO: make this a general utility function?
@define
def _():
import collections
import inspect
wrapper_signature = inspect.signature(mpl_axis)
base_signature = inspect.signature(mpl_set)
parameters = collections.OrderedDict()
parameters["ax"] = wrapper_signature.parameters["ax"].replace(
kind=inspect.Parameter.POSITIONAL_ONLY
)
parameters.update(base_signature.parameters)
mpl_axis.__signature__ = wrapper_signature.replace(parameters=parameters.values())
def diagonal(**kwargs):
"""Draw a diagonal line in the current axis."""
import matplotlib.pyplot as plt
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
vmin = max(xmin, ymin)
vmax = min(xmax, ymax)
plt.plot([vmin, vmax], [vmin, vmax], **kwargs)
def qlineplot(*, x, y, hue, data, ci=0.95):
"""Plot median as line, quantiles as shading.
"""
import matplotlib.pyplot as plt
agg_data = data.groupby([x, hue])[y].quantile([1 - ci, 0.5, ci]).unstack()
hue_values = data[hue].unique()
for color, hue_value in colorize(hue_values):
subset = agg_data.xs(hue_value, level=hue)
plt.fill_between(subset.index, subset.iloc[:, 0], subset.iloc[:, 2], alpha=0.2)
for color, hue_value in colorize(hue_values):
subset = agg_data.xs(hue_value, level=hue)
plt.plot(subset.index, subset.iloc[:, 1], label=hue_value, marker=".")
plt.legend(loc="best")
plt.xlabel(x)
plt.ylabel(y)
class pgm:
"""Wrapper around :class:`daft.PGM` to allow fluid call chains.
Usage::
(
pgm(observed_style="inner", ax=ax1)
.node("z", r"$Z$", 1.5, 2)
.node("x", r"$X$", 1, 1)
.node("y", r"$Y$", 2, 1)
.edge("z", "x")
.edge("x", "y")
.edge("z", "y")
.render(xlim=(1, 5), ylim=(1, 5))
)
To annotate a node use::
.annotate(node_name, annotation_text)
Nodes can also be created without explicit lables (in which case the node
name is used)::
.node("z", 1, 1)
node("z", "label", 1, 1)
"""
def __init__(self, *, ax=None, nodes=(), edges=(), annotations=(), **kwargs):
if not _HAS_DAFT:
raise RuntimeError("daft is required for pgm support.")
self.ax = ax
self.daft_kwargs = kwargs
self._nodes = list(nodes)
self._edges = list(edges)
self._annotations = list(annotations)
def update(self, nodes=None, edges=None, annotations=None):
"""Replace a full set of features."""
if nodes is None:
nodes = self._nodes
if edges is None:
edges = self._edges
if annotations is None:
annotations = self._annotations
return type(self)(
nodes=nodes,
edges=edges,
annotations=annotations,
ax=self.ax,
**self.daft_kwargs,
)
def node(self, *args, edgecolor=None, facecolor=None, **kwargs):
if edgecolor is not None:
kwargs.setdefault("plot_params", {}).update(edgecolor=edgecolor)
if facecolor is not None:
kwargs.setdefault("plot_params", {}).update(facecolor=facecolor)
node = Object(kwargs=kwargs)
if len(args) == 3:
node.name, node.x, node.y = args
node.label = node.name
else:
node.name, node.label, node.x, node.y = args
return self.update(nodes=self._nodes + [node])
def edge(self, from_node, to_node, **kwargs):
edge = Object(from_node=from_node, to_node=to_node, kwargs=kwargs)
return self.update(edges=self._edges + [edge])
def edges(self, from_nodes, to_nodes, **kwargs):
current = self
for from_node, to_node in it.product(from_nodes, to_nodes):
current = current.edge(from_node, to_node, **kwargs)
return current
def remove(self, incoming=(), outgoing=()):
"""Remove edges that point in or out of a the specified nodes.
"""
incoming = set(incoming)
outgoing = set(outgoing)
edges_to_keep = [
edge
for edge in self._edges
if (edge.from_node not in outgoing and edge.to_node not in incoming)
]
return self.update(edges=edges_to_keep)
def annotate(self, node, text):
annotation = Object(node=node, text=text)
return self.update(annotations=self._annotations + [annotation])
def render(self, ax=None, axis=False, xlim=None, ylim=None, **kwargs):
"""Render the figure.
:param ax:
the axes to draw into. If not given, the axis specified in
`__init__` or the current axes is used.
:param xlim:
the xlim to use. If not given, it is determined from the data.
:param ylim:
the ylim to use. If not given, it is determined from the data.
:param kwargs:
keyword arguments forward to mpl set.
:returns:
the `pgm` object.
"""
import daft
import matplotlib.pyplot as plt
if ax is None:
if self.ax is not None:
ax = self.ax
else:
ax = plt.gca()
pgm = _PGM(ax=ax)
for node in self._nodes:
daft_node = daft.Node(node.name, node.label, node.x, node.y, **node.kwargs)
pgm.add_node(daft_node)
for edge in self._edges:
pgm.add_edge(edge.from_node, edge.to_node, **edge.kwargs)
for annot in self._annotations:
self._render_annotation(pgm, annot)
if xlim is None or ylim is None:
data_xlim, data_ylim = pgm.get_limits()
if xlim is None:
xlim = expand(*data_xlim, 0.10)
if ylim is None:
ylim = expand(*data_ylim, 0.10)
pgm.render()
mpl_set(**kwargs, axis=axis, xlim=xlim, ylim=ylim, ax=ax)
return pgm
def _render_annotation(self, pgm, annot):
extent = pgm.get_node_extent(annot.node)
pgm._ctx._ax.text(
extent.x, extent.y - 0.5 * extent.height, annot.text, va="top", ha="center"
)
def _ipython_display_(self):
self.render()
class _PGM(PGM):
def __init__(self, *, ax=None, **kwargs):
super().__init__([1.0, 1.0], origin=[0.0, 0.0], **kwargs)
self._ctx._ax = ax
self._ctx._figure = ax.get_figure()
def get_node_extent(self, node):
# TODO: incorporate the complete logic of daft?
ctx = self._ctx
if isinstance(node, str):
node = self._nodes[node]
aspect = node.aspect if node.aspect is not None else ctx.aspect
height = node.scale * ctx.node_unit
width = aspect * height
center_x = ctx.grid_unit * node.x
center_y = ctx.grid_unit * node.y
return Object(
x=center_x,
y=center_y,
width=width,
height=height,
xmin=center_x - 0.5 * width,
xmax=center_x + 0.5 * width,
ymin=center_y - 0.5 * height,
ymax=center_y + 0.5 * height,
)
def get_limits(self):
nodes = list(self._nodes.values())
if not nodes:
return (0, 1), (0, 1)
extent = self.get_node_extent(nodes[0])
xmin = extent.xmin
xmax = extent.xmax
ymin = extent.ymin
ymax = extent.ymax
for node in nodes[1:]:
extent = self.get_node_extent(node)
xmin = min(xmin, extent.xmin)
xmax = max(xmax, extent.xmax)
ymin = min(ymin, extent.ymin)
ymax = max(ymax, extent.ymax)
return (xmin, xmax), (ymin, ymax)
def edges(x):
"""Create edges for use with pcolor.
Usage::
assert x.size == v.shape[1]
assert y.size == v.shape[0]
pcolor(edges(x), edges(y), v)
"""
import numpy as np
centers = 0.5 * (x[1:] + x[:-1])
return np.concatenate(
([x[0] - 0.5 * (x[1] - x[0])], centers, [x[-1] + 0.5 * (x[-1] - x[-2])])
)
def center(u):
"""Compute the center between edges."""
return 0.5 * (u[1:] + u[:-1])
def caption(s, size=13, strip=True):
"""Add captions to matplotlib graphs."""
import matplotlib.pyplot as plt
if strip:
s = s.splitlines()
s = (i.strip() for i in s)
s = (i for i in s if i)
s = " ".join(s)
plt.figtext(0.5, 0, s, wrap=True, size=size, va="bottom", ha="center")
_caption = caption
def change_vspan(
x,
y,
*,
data=None,
color=("w", "0.90"),
transform_x=None,
transform_y=None,
skip_nan=True,
**kwargs,
):
"""Plot changes in a quantity with vspans.
"""
import matplotlib.pyplot as plt
import numpy as np
x, y = _prepare_xy(
x,
y,
data=data,
transform_x=transform_x,
transform_y=transform_y,
skip_nan=skip_nan,
)
if not isinstance(color, (tuple, list)):
color = [color]
changes = _find_changes(y)
changes = np.concatenate([[0], changes, [len(y) - 1]])
for start, end, c in zip(changes[:-1], changes[1:], it.cycle(color)):
plt.axvspan(x[start], x[end], color=c, **kwargs)
def change_plot(
x, y, *, data=None, transform_x=None, transform_y=None, skip_nan=True, **kwargs
):
"""Plot changes in a quantity with pyplot's standard plot function.
"""
import matplotlib.pyplot as plt
x, y = _prepare_xy(
x,
y,
data=data,
transform_x=transform_x,
transform_y=transform_y,
skip_nan=skip_nan,
)
changes = _find_changes(y)
x = x[changes]
y = y[changes]
plt.plot(x, y, **kwargs)
def axtext(*args, **kwargs):
"""Add a text in axes coordinates (similar ``figtext``).
Usage::
axtext(0, 0, 'text')
"""
import matplotlib.pyplot as plt
kwargs.update(transform=plt.gca().transAxes)
plt.text(*args, **kwargs)
def plot_gaussian_contour(x, y, *, data=None, q=(0.99,), ax=None, **kwargs):
"""Plot isocontours of the maximum likelihood Gaussian for ``x, y``.
:param q:
the quantiles to show.
"""
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import scipy.special
if ax is not None:
plt.sca(ax)
kwargs.setdefault("facecolor", "none")
kwargs.setdefault("edgecolor", "k")
q = np.atleast_1d(q)
if data is not None:
x = data[x]
y = data[y]
x = np.asarray(x)
y = np.asarray(y)
mx = np.mean(x)
my = np.mean(y)
xx = np.mean((x - mx) * (x - mx))
yy = np.mean((y - my) * (y - my))
xy = np.mean((x - mx) * (y - my))
cov = np.asarray([[xx, xy], [xy, yy]])
eigvals, eigvects = np.linalg.eig(cov)
dx, dy = eigvects[:, 0]
angle = math.atan2(dy, dx) / (2 * math.pi) * 360
for _q in q:
s = (2 ** 0.5) * scipy.special.erfinv(_q)
artist = mpl.patches.Ellipse((mx, my), *(s * eigvals), angle, **kwargs)
plt.gca().add_artist(artist)
return artist
def _prepare_xy(x, y, data=None, transform_x=None, transform_y=None, skip_nan=True):
if data is not None:
x = data[x]
y = data[y]
x, y = _optional_skip_nan(x, y, skip_nan=skip_nan)
if transform_x is not None:
x = transform_x(x)
if transform_y is not None:
y = transform_y(y)
return x, y
def _find_changes(v):
import numpy as np
changes, = np.nonzero(np.diff(v))
changes = changes + 1
return changes
def _optional_skip_nan(x, y, skip_nan=True):
import numpy as np
if not skip_nan:
return x, y
s = np.isfinite(y)
return x[s], y[s]
def _dict_of_optionals(**kwargs):
return {k: v for k, v in kwargs.items() if v is not None}
@ft.singledispatch
def get_children(est):
return []
def to_markdown(df, index=False):
"""Return a string containg the markdown of the table.
Depends on the ``tabulate`` dependency.
"""
from tabulate import tabulate
return tabulate(df, tablefmt="pipe", headers="keys", showindex=index)
def index_query(obj, expression, scalar=False):
"""Execute a query expression on the index and return matching rows.
:param scalar:
if True, return only the first item. Setting ``scalar=True``
raises an error if the resulting object has have more than one
entry.
"""
res = obj.loc[obj.index.to_frame().query(expression).index]
if scalar:
assert res.size == 1
return res.iloc[0]
return res
def fix_categories(
s, categories=None, other_category=None, inplace=False, groups=None, ordered=False
):
"""Fix the categories of a categorical series.
:param pd.Series s:
the series to normalize
:param Optional[Iterable[Any]] categories:
the categories to keep. The result will have categories in the
iteration order of this parameter. If not given but ``groups`` is
passed, the keys of ``groups`` will be used, otherwise the existing
categories of ``s`` will be used.
:param Optional[Any] other_category:
all categories to be removed wil be mapped to this value, unless they
are specified specified by the ``groups`` parameter. If given and not
included in the categories, it is appended to the given categories.
For a custom order, ensure it is included in ``categories``.
:param bool inplace:
if True the series will be modified in place.
:param Optional[Mapping[Any,Iterable[Any]]] groups:
if given, specifies which categories to replace by which in the form
of ``{replacement: list_of_categories_to_replace}``.
:param bool ordered:
if True the resulting series will have ordered categories.
"""
import pandas.api.types as pd_types
if not inplace:
s = s.copy()
if not pd_types.is_categorical(s):
if inplace:
raise ValueError("cannot change the type inplace")
s = s.astype("category")
if categories is None:
if groups is not None:
categories = list(groups.keys())
else:
categories = list(s.cat.categories)
categories = list(categories)
inital_categories = set(s.cat.categories)
if other_category is not None and other_category not in categories:
categories = categories + [other_category]
additions = [c for c in categories if c not in inital_categories]
removals = [c for c in inital_categories if c not in categories]
if groups is None:
groups = {}
else:
groups = {k: set(v) for k, v in groups.items()}
remapped = {c for group in groups.values() for c in group}
dangling_categories = {*removals} - {*remapped}
if dangling_categories:
if other_category is None:
raise ValueError(
"dangling categories %s found, need other category to assign"
% dangling_categories
)
groups.setdefault(other_category, set()).update(set(removals) - set(remapped))
if additions:
s.cat.add_categories(additions, inplace=True)
for replacement, group in groups.items():
s[s.isin(group)] = replacement
if removals:
s.cat.remove_categories(removals, inplace=True)
s.cat.reorder_categories(categories, inplace=True, ordered=ordered)
return s
def find_high_frequency_categories(s, min_frequency=0.02, n_max=None):
"""Find categories with high frequency.
:param float min_frequency:
the minimum frequency to keep
:param Optional[int] n_max:
if given keep at most ``n_max`` categories. If more are present after
filtering for minimum frequency, keep the highest ``n_max`` frequency
columns.
"""
assert 0.0 < min_frequency < 1.0
s = s.value_counts(normalize=True).pipe(lambda s: s[s > min_frequency])
if n_max is None:
return list(s.index)
if len(s) <= n_max:
return s
return list(s.sort_values(ascending=False).iloc[:n_max].index)
def as_frame(**kwargs):
import pandas as pd
return pd.DataFrame().assign(**kwargs)
def singledispatch_on(idx):
"""Helper to dispatch on any argument, not only the first one."""
# It works by wrapping the function to include the relevant
# argument as first argument as well.
def decorator(func):
@ft.wraps(func)
def wrapper(*args, **kwargs):
dispatch_obj = args[idx]
return dispatcher(dispatch_obj, *args, **kwargs)
def make_call_impl(func):
@ft.wraps(func)
def impl(*args, **kwargs):
_, *args = args
return func(*args, **kwargs)
return impl
def register(type):
def decorator(func):
dispatcher.register(type)(make_call_impl(func))
return func
return decorator
wrapper.register = register
dispatcher = ft.singledispatch(make_call_impl(func))
return wrapper
return decorator
def setdefaultattr(obj, name, value):
"""``dict.setdefault`` for attributes"""
if not hasattr(obj, name):
setattr(obj, name, value)
return getattr(obj, name)
# keep for backwards compat
def sapply(func, obj, sequences=default_sequences, mappings=default_mappings):
return smap(func, obj, sequences=sequences, mappings=mappings)
def szip(
iterable_of_objects,
sequences=default_sequences,
mappings=default_mappings,
return_schema=False,
):
"""Zip but for deeply nested objects.
For a list of nested set of objects return a nested set of list.
"""
iterable_of_objects = iter(iterable_of_objects)
try:
first = next(iterable_of_objects)
except StopIteration:
return None
# build a scaffold into which the results are appended
# NOTE: the target lists must not be confused with the structure, use a
# schema object as an unambiguous marker.
schema = smap(lambda _: None, first, sequences=sequences, mappings=mappings)
target = smap(lambda _: [], schema, sequences=sequences, mappings=mappings)
for obj in it.chain([first], iterable_of_objects):
smap(
lambda _, t, o: t.append(o),
schema,
target,
obj,
sequences=sequences,
mappings=mappings,
)
return target if return_schema is False else (target, schema)
def flatten_with_index(obj, sequences=default_sequences, mappings=default_mappings):
counter = iter(it.count())
flat = []
def _build(item):
flat.append(item)
return next(counter)
index = smap(_build, obj, sequences=sequences, mappings=mappings)
return index, flat
def unflatten(index, obj, sequences=default_sequences, mappings=default_mappings):
obj = list(obj)
return smap(lambda idx: obj[idx], index, sequences=sequences, mappings=mappings)
def smap(func, arg, *args, sequences=default_sequences, mappings=default_mappings):
"""A structured version of map.
The structure is taken from the first arguments.
"""
return _smap(func, arg, *args, path="$", sequences=sequences, mappings=mappings)
def _smap(
func, arg, *args, path, sequences=default_sequences, mappings=default_mappings
):
try:
if isinstance(arg, sequences):
return type(arg)(
_smap(
func,
*co,
path=f"{path}.{idx}",
sequences=sequences,
mappings=mappings,
)
for idx, *co in zip(it.count(), arg, *args)
)
elif isinstance(arg, mappings):
return type(arg)(
(
k,
_smap(
func,
arg[k],
*(obj[k] for obj in args),
path=f"{path}.k",
sequences=sequences,
mappings=mappings,
),
)
for k in arg
)
else:
return func(arg, *args)
# pass through any exceptions in smap without further annotations
except SApplyError:
raise
except Exception as e:
raise SApplyError(f"Error in sappend at {path}: {e}") from e
def copy_structure(
template, obj, sequences=default_sequences, mappings=default_mappings
):
"""Arrange ``obj`` into the structure of ``template``.
:param template:
the object of which top copy the structure
:param obj:
the object which to arrange into the structure. If it is
already structured, the template structure and its structure
must be the same or a value error is raised
"""
template_schema = smap(
lambda _: None, template, sequences=sequences, mappings=mappings
)
obj_schema = smap(lambda _: None, obj, sequences=sequences, mappings=mappings)
if obj_schema is not None:
if obj_schema != template_schema:
raise ValueError("Misaligned structures")
return obj
return smap(lambda _: obj, template_schema, sequences=sequences, mappings=mappings)
def assert_has_schema(
nested_obj, expected_schema, sequences=default_sequences, mappings=default_mappings
):
actual_schema = smap(
lambda _: None, nested_obj, sequences=sequences, mappings=mappings
)
if actual_schema != expected_schema:
raise AssertionError(
f"Schemas do not match: {actual_schema} != {expected_schema}"
)
class SApplyError(Exception):
pass
def json_numpy_default(obj):
"""A default implementation for ``json.dump`` that deals with numpy datatypes.
"""
import numpy as np
int_types = (
np.int0,
np.int8,
np.int16,
np.int32,
np.int64,
np.uint0,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
)
float_types = (np.float16, np.float32, np.float64, np.float128)
if isinstance(obj, int_types):
return int(obj)
elif isinstance(obj, float_types):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
raise TypeError(f"Cannot convert type of {type(obj).__name__}")
def piecewise_linear(x, y, xi):
return _piecewise(_linear_interpolator, x, y, xi)
def piecewise_logarithmic(x, y, xi=None):
return _piecewise(_logarithmic_interpolator, x, y, xi)
def _linear_interpolator(u, y0, y1):
return y0 + u * (y1 - y0)
def _logarithmic_interpolator(u, y0, y1):
return (y0 ** (1 - u)) * (y1 ** u)
def _piecewise(interpolator, x, y, xi):
assert len(x) == len(y)
interval = bisect.bisect_right(x, xi)
if interval == 0:
return y[0]
if interval >= len(x):
return y[-1]
u = (xi - x[interval - 1]) / (x[interval] - x[interval - 1])
return interpolator(u, y[interval - 1], y[interval])
bg_instances = {}
def bgloop(tag, *iterables, runner=None):
"""Run a loop in a background thread."""
if runner is None:
runner = run_thread
def decorator(func):
if tag in bg_instances and bg_instances[tag].running:
raise RuntimeError("Already running loop")
bg_instances[tag] = Object()
bg_instances[tag].running = True
bg_instances[tag].handle = runner(_run_loop, tag, func, iterables)
return func
def _run_loop(tag, func, iterables):
try:
bg_instances[tag].running = True
for loop, item in Loop.over(
fast_product(*iterables), length=product_len(*iterables)
):
if not bg_instances[tag].running:
break
func(loop, *item)
finally:
bg_instances[tag].running = False
return decorator
def cancel(tag):
if tag in bg_instances:
bg_instances[tag].running = False
def wait(tag):
if tag in bg_instances and bg_instances[tag].handle is not None:
bg_instances[tag].handle.join()
def run_direct(*args, **kwargs):
func, *args = args
func(*args, **kwargs)
def run_thread(*args, **kwargs):
func, *args = args
t = threading.Thread(target=func, args=args, kwargs=kwargs)
t.start()
return t
def product_len(*iterables):
if not iterables:
return 1
head, *tail = iterables
return len(head) * product_len(*tail)
def fast_product(*iterables):
if not iterables:
yield ()
return
head, *tail = iterables
for i in head:
for j in fast_product(*tail):
yield (i,) + j
class Display:
"""An interactive display for use in background tasks."""
def __init__(self, obj=None):
from IPython.core.display import display
self.handle = display(obj, display_id=True)
def update(self, obj):
self.handle.update(obj)
def print(self, *args, sep=" "):
from IPython.core.display import Pretty
self.handle.update(Pretty(sep.join(str(a) for a in args)))
def figure(self):
from IPython.core.display import Image
import matplotlib.pyplot as plt
with io.BytesIO() as fobj:
plt.savefig(fobj, format="png")
plt.close()
self.handle.update(Image(fobj.getvalue(), format="png"))
def pd_has_ordered_assign():
import pandas as pd
py_major, py_minor, *_ = sys.version_info
pd_major, pd_minor, *_ = pd.__version__.split(".")
pd_major = int(pd_major)
pd_minor = int(pd_minor)
return (py_major, py_minor) >= (3, 6) and (pd_major, pd_minor) >= (0, 23)
def timed(tag=None, level=logging.INFO):
"""Time a codeblock and log the result.
Usage::
with timed():
long_running_operation()
:param any tag:
an object used to identify the timed code block. It is printed with
the time taken.
"""
return _TimedContext(
message=("[TIMING] %s s" if tag is None else "[TIMING] {} %s s".format(tag)),
logger=_get_caller_logger(),
level=level,
)
# use a custom contextmanager to control stack level for _get_caller_logger
class _TimedContext(object):
def __init__(self, logger, message, level):
self.logger = logger
self.message = message
self.level = level
def __enter__(self):
self.start = time.time()
def __exit__(self, exc_type, exc_val, exc_tb):
end = time.time()
self.logger.log(self.level, self.message, end - self.start)
def _get_caller_logger(depth=2):
stack = inspect.stack()
if depth >= len(stack): # pragma: no cover
return logging.getLogger(__name__)
# NOTE: python2 returns raw tuples, index 0 is the frame
frame = stack[depth][0]
name = frame.f_globals.get("__name__")
return logging.getLogger(name)
def find_categorical_columns(df):
"""Find all categorical columns in the given dataframe.
"""
import pandas.api.types as pd_types
return [k for k, dtype in df.dtypes.items() if pd_types.is_categorical_dtype(dtype)]
def filter_low_frequency_categories(
columns=None, min_frequency=0.02, other_category=None, n_max=None
):
"""Build a transformer to filter low frequency categories.
Usage::
pipeline = build_pipeline[
categories=filter_low_frequency_categories(),
predict=lgb.LGBMClassifier(),
)
"""
if columns is not None and not isinstance(columns, (list, tuple)):
columns = [columns]
return FilterLowFrequencyTransfomer(columns, min_frequency, other_category, n_max)
class FilterLowFrequencyTransfomer(BaseEstimator, TransformerMixin):
def __init__(
self, columns=None, min_frequency=0.02, other_category="other", n_max=None
):
self.columns = columns
self.min_frequency = min_frequency
self.other_category = other_category
self.n_max = n_max
self._columns = columns
self._to_keep = {}
def fit(self, df, y=None):
if self._columns is None:
self._columns = find_categorical_columns(df)
for col in self._columns:
try:
to_keep = find_high_frequency_categories(
df[col],
min_frequency=self._get("min_frequency", col),
n_max=self._get("n_max", col),
)
except Exception as e:
raise RuntimeError(
f"cannot determine high frequency categories for {col} due to {e}"
)
self._to_keep[col] = to_keep
return self
def transform(self, df, y=None):
for col in self._columns:
df = df.assign(
**{
col: fix_categories(
df[col],
self._to_keep[col],
other_category=self._get("other_category", col),
)
}
)
return df
def _get(self, key, col):
var = getattr(self, key)
return var[col] if isinstance(var, dict) else var
def column_transform(*args, **kwargs):
"""Build a transformer for a list of columns.
Usage::
pipeline = build_pipeline(
transform=column_transform(['a', 'b'], np.abs),
classifier=sk_ensemble.GradientBoostingClassifier(),
])
Or::
pipeline = build_pipeline(
transform=column_transform(
a=np.abs,
b=op.pos,
),
classifier=sk_ensemble.GradientBoostingClassifier(),
)
"""
if not args:
columns = kwargs
else:
columns, func, *args = args
if not isinstance(columns, (list, tuple)):
columns = [columns]
func = ft.partial(func, *args, **kwargs)
columns = {c: func for c in columns}
return transform(_column_transform, columns=columns)
def _column_transform(x, columns):
if not hasattr(x, "assign"):
raise RuntimeError("can only transform objects with an assign method.")
for c, func in columns.items():
x = x.assign(**{c: func(x[c])})
return x
def build_pipeline(**kwargs):
"""Build a pipeline from named steps.
The order of the keyword arguments is retained. Note, this functionality
requires python ``>= 3.6``.
Usage::
pipeline = build_pipeline(
transform=...,
predict=...,
)
"""
import sklearn.pipeline as sk_pipeline
if sys.version_info[:2] < (3, 6):
raise RuntimeError("pipeline factory requires deterministic kwarg order")
return sk_pipeline.Pipeline(list(kwargs.items()))
def transform(*args, **kwargs):
"""Build a function transformer with args / kwargs bound.
Usage::
pipeline = build_pipeline(
transform=transform(np.abs)),
classifier=sk_ensemble.GradientBoostingClassifier()),
)
"""
func, *args = args
return FuncTransformer(ft.partial(func, *args, **kwargs))
class FuncTransformer(TransformerMixin, BaseEstimator):
"""Simple **non-validating** function transformer.
:param callable func:
the function to apply on transform
"""
def __init__(self, func):
self.func = func
def fit(self, x, y=None):
return self
def partial_fit(self, x, y=None):
return self
def transform(self, x):
return self.func(x)
class FuncClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, func):
self.func = func
def fit(self, df, y=None):
return self
def predict_proba(self, df):
return self.func(df)
def predict(self, df):
import numpy as np
return np.argmax(self.predict_proba(df), axis=1)
class FuncRegressor(BaseEstimator, RegressorMixin):
def __init__(self, func):
self.func = func
def fit(self, df, y=None):
return self
def predict(self, df):
return self.func(df)
class DataFrameEstimator(BaseEstimator):
"""Add support for dataframe use to sklearn estimators.
"""
def __init__(self, est):
self.est = est
def fit(self, x, y=None, **kwargs):
import numpy as np
x = x.reset_index(drop=True)
y = np.asarray(x[y])
self.est.fit(x, y, **kwargs)
return self
def predict(self, x, y=None):
x = x.reset_index(drop=True)
return self.est.predict(x)
def predict_proba(self, x, y=None):
x = x.reset_index(drop=True)
return self.est.predict_proba(x)
@get_children.register(DataFrameEstimator)
def df_estimator(est):
return [(0, est.est)]
class OneHotEncoder(BaseEstimator, TransformerMixin):
def __init__(self, columns=None):
self.columns = columns
self.columns_ = columns
self.levels_ = collections.OrderedDict()
def fit(self, x, y=None):
if self.columns_ is None:
self.columns_ = find_categorical_columns(x)
for col in self.columns_:
try:
self.levels_[col] = multi_type_sorted(x[col].unique())
except Exception as e:
raise RuntimeError(f"cannot fit {col}") from e
return self
def transform(self, x, y=None):
for col in self.columns_:
try:
assignments = {}
for level in self.levels_[col]:
assignments[f"{col}_{level}"] = (x[col] == level).astype(float)
x = x.drop([col], axis=1).assign(**assignments)
except Exception as e:
raise RuntimeError(f"cannot transform {col}") from e
return x
def multi_type_sorted(vals):
import pandas as pd
return sorted(
vals, key=lambda v: (type(v).__module__, type(v).__name__, | pd.isnull(v) | pandas.isnull |
# HCA Live-Trade Conversion: Date:2020-08-13
# HCA Live-Simulation: Conversion Date: 08-13-2020
# Amended in Accordance with decision to use order: 2020-08-29
# Conversion Author: <NAME>
from zipline.api import symbol,symbols, get_open_orders, order
import numpy as np
import pandas as pd
# Start:Zipline Builtin Functions
def initialize(context):
context.asserts = symbols('SPY','ZSL', 'KOLD', 'GLD')
context.bonds = symbol('SHY')
context.universe = symbols('SPY','ZSL', 'KOLD', 'GLD', 'SHY')
context.top_n_by_momentum = | pd.Series() | pandas.Series |
import pandas as pd
import matplotlib as mpl
import numpy as np
from sklearn import metrics
import itertools
import warnings
from dateutil.relativedelta import relativedelta
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import ticker
import matplotlib.dates as mdates
from matplotlib.dates import DateFormatter
# plt.style.use('ggplot')
sns.set_theme(style="darkgrid")
font = {'size' : 12}
mpl.rc('font', **font)
mpl.rc('figure', max_open_warning = 0)
pd.set_option('display.max_columns',None)
pd.set_option('display.max_rows',25)
# only display whole years in figures
years = mdates.YearLocator()
years_fmt = mdates.DateFormatter('%Y')
print('Functions loaded.')
################################################################################
def melt_data(df):
'''
Takes in a Zillow Housing Data File (ZHVI) as a DataFrame in wide format
and returns a melted DataFrame
'''
melted = pd.melt(df, id_vars=['RegionID', 'RegionName', 'City', 'State', 'StateName', 'Metro', 'CountyName', 'SizeRank', 'RegionType'], var_name='date')
melted['date'] = pd.to_datetime(melted['date'], infer_datetime_format=True)
melted = melted.dropna(subset=['value'])
return melted
def visualize_data(df, sf_all, bedrooms):
fig, ax = plt.subplots(figsize=(15,10))
ax.set_title(f'{bedrooms}-Bedroom Home Values in San Franciso by Zip Code', size=24)
sns.lineplot(data=df, x=df.date, y=df.value, ax=ax, hue='zipcode', style='zipcode')
sns.lineplot(data=sf_all, x=sf_all.index, y=sf_all.value, ax=ax, color = 'b', label='all')
ax.set_xlabel('Year', size=20)
ax.set_ylabel('Home Value (USD)', size=20)
ax.set_xlim(pd.Timestamp('1996'), pd.Timestamp('2022-05-31'))
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(years_fmt)
ax.set_yticks(np.linspace(1e5,1.5e6,15))
ax.set_ylim((1e5, 1.5e6))
ax.get_yaxis().set_major_formatter(ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.savefig(f'images/{bedrooms}_bdrm_home_values.png')
def create_df_dict(df):
zipcodes = list(set(df.zipcode))
keys = [zipcode for zipcode in map(str,zipcodes)]
data_list = []
for key in keys:
new_df = df.copy()[df.zipcode == int(key)]
new_df.drop('zipcode', inplace=True, axis=1)
new_df.columns = ['date', 'value']
new_df.date = pd.to_datetime(new_df.date)
new_df.set_index('date', inplace=True)
new_df = new_df.asfreq('M')
data_list.append(new_df)
df_dict = dict(zip(keys, data_list))
return df_dict
def test_stationarity(df_all, diffs=0):
if diffs == 2:
dftest = adfuller(df_all.diff().diff().dropna())
elif diffs == 1:
dftest = adfuller(df_all.diff().dropna())
else:
dftest = adfuller(df_all)
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic',
'p-value', '#Lags Used',
'Number of Observations Used'])
for key, value in dftest[4].items():
dfoutput['Critical Value (%s)' %key] = value
print (dfoutput)
def test_stationarity_all_zips(df_dict, diffs=0):
for zipcode, df in df_dict.items():
if diffs == 2:
dftest = adfuller(df.diff().diff().dropna())
elif diffs == 1:
dftest = adfuller(df.diff().dropna())
else:
dftest = adfuller(df)
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic',
'p-value', '#Lags Used',
'Number of Observations Used'])
for key, value in dftest[4].items():
dfoutput['Critical Value (%s)' %key] = value
print(dfoutput[1])
def plot_pacf_housing(df_all, bedrooms):
pacf_fig, ax = plt.subplots(1, 2, figsize=(12, 6))
pacf_fig.suptitle(f'Partial Autocorrelations of {bedrooms}-Bedroom Time Series for Entire San Francisco Data Set', fontsize=18)
plot_pacf(df_all, ax=ax[0])
ax[0].set_title('Undifferenced PACF', size=14)
ax[0].set_xlabel('Lags', size=14)
ax[0].set_ylabel('PACF', size=14)
plot_pacf(df_all.diff().dropna(), ax=ax[1])
ax[1].set_title('Differenced PACF', size=14)
ax[1].set_xlabel('Lags', size=14)
ax[1].set_ylabel('PACF', size=14)
pacf_fig.tight_layout()
pacf_fig.subplots_adjust(top=0.9)
plt.savefig(f'images/{bedrooms}_bdrm_PACF.png')
def plot_acf_housing(df_all, bedrooms):
acf_fig, ax = plt.subplots(1, 3, figsize=(18, 6))
acf_fig.suptitle(f'Autocorrelations of {bedrooms}-Bedroom Time Series for Entire San Francisco Data Set', fontsize=18)
plot_acf(df_all, ax=ax[0])
ax[0].set_title('Undifferenced ACF', size=14)
ax[0].set_xlabel('Lags', size=14)
ax[0].set_ylabel('ACF', size=14)
plot_acf(df_all.diff().dropna(), ax=ax[1])
ax[1].set_title('Once-Differenced ACF', size=14)
ax[1].set_xlabel('Lags', size=14)
ax[1].set_ylabel('ACF', size=14)
plot_acf(df_all.diff().diff().dropna(), ax=ax[2])
ax[2].set_title('Twice-Differenced ACF', size=14)
ax[2].set_xlabel('Lags', size=14)
ax[2].set_ylabel('ACF', size=14)
acf_fig.tight_layout()
acf_fig.subplots_adjust(top=0.9)
plt.savefig(f'images/{bedrooms}_bdrm_ACF.png')
def plot_seasonal_decomposition(df_all, bedrooms):
decomp = seasonal_decompose(df_all, period=12)
dc_obs = decomp.observed
dc_trend = decomp.trend
dc_seas = decomp.seasonal
dc_resid = decomp.resid
dc_df = pd.DataFrame({"observed": dc_obs, "trend": dc_trend,
"seasonal": dc_seas, "residual": dc_resid})
start = dc_df.iloc[:, 0].index[0]
end = dc_df.iloc[:, 0].index[-1] + relativedelta(months=+15) + relativedelta(day=31)
decomp_fig, axes = plt.subplots(4, 1, figsize=(15, 15))
for i, ax in enumerate(axes):
ax.plot(dc_df.iloc[:, i])
ax.set_xlim(start, end)
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(years_fmt)
ax.set_ylabel(dc_df.iloc[:, i].name)
if i != 2:
ax.get_yaxis().set_major_formatter(ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
plt.setp(ax.xaxis.get_majorticklabels(), ha="right", rotation=45, rotation_mode="anchor")
decomp_fig.suptitle(
f'Seasonal Decomposition of {bedrooms}-Bedroom Time Series of San Francisco Home Values (Mean)', fontsize=24)
decomp_fig.tight_layout()
decomp_fig.subplots_adjust(top=0.94)
plt.savefig(f'images/{bedrooms}_bdrm_seasonal_decomp.png')
def train_test_split_housing(df_dict, split=84):
print(f'Using a {split}/{100-split} train-test split...')
cutoff = [round((split/100)*len(df)) for zipcode, df in df_dict.items()]
train_dict_list = [df_dict[i][:cutoff[count]] for count, i in enumerate(list(df_dict.keys()))]
train_dict = dict(zip(list(df_dict.keys()), train_dict_list))
test_dict_list = [df_dict[i][cutoff[count]:] for count, i in enumerate(list(df_dict.keys()))]
test_dict = dict(zip(list(df_dict.keys()), test_dict_list))
return train_dict, test_dict
def gridsearch_SARIMAX(train_dict, seas = 12, p_min=2, p_max=2, q_min=0, q_max=0, d_min=1, d_max=1,
s_p_min=2, s_p_max=2, s_q_min=0, s_q_max=0, s_d_min=1, s_d_max=1, verbose=True):
p = range(p_min, p_max+1)
q = range(q_min, q_max+1)
d = range(d_min, d_max+1)
s_p = range(s_p_min, s_p_max+1)
s_q = range(s_q_min, s_q_max+1)
s_d = range(s_d_min, s_d_max+1)
pdq = list(itertools.product(p, d, q))
seasonal_pdq = [(x[0], x[1], x[2], seas) for x in list(itertools.product(s_p, s_d, s_q))]
if verbose:
print('Parameters for SARIMAX grid search...')
for i in pdq:
for s in seasonal_pdq:
print('SARIMAX: {} x {}'.format(i, s))
zipcodes = []
param_list = []
param_seasonal_list = []
aic_list = []
for zipcode, train in train_dict.items():
for param in pdq:
for param_seasonal in seasonal_pdq:
mod = SARIMAX(train,
order=param,
seasonal_order=param_seasonal,
enforce_stationarity=False,
enforce_invertibility=False)
zipcodes.append(zipcode[-5:])
param_list.append(param)
param_seasonal_list.append(param_seasonal)
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
aic = mod.fit(maxiter=1000).aic
except Warning as e:
continue
aic_list.append(aic)
if verbose:
print(param,param_seasonal)
print(f'Zip Code {zipcode} | AIC: {aic}')
else:
print('-', end='')
print('\nCompleted.')
return zipcodes, param_list, param_seasonal_list, aic_list
def get_best_params(zipcodes, param_list, param_seasonal_list, aic_list, bedrooms):
# intialize list of model params
model_data = {'zipcode': zipcodes,
'param': param_list,
'param_seasonal': param_seasonal_list,
'aic': aic_list
}
# Create model params DataFrames
sarimax_details_df = pd.DataFrame(model_data)
# print(sarimax_details_df.shape)
best_params_df = sarimax_details_df.loc[sarimax_details_df.groupby('zipcode')['aic'].idxmin()]
best_params_df.set_index('zipcode', inplace=True)
print(best_params_df)
best_params_df.to_csv(f'data/{bedrooms}_bdrm_best_params.csv')
return best_params_df
def evaluate_model(train_dict, test_dict, model_best_df):
predict_dict = {}
cat_predict_dict = train_dict.copy()
for _ in range(5):
for zipcode, df in cat_predict_dict.items():
if cat_predict_dict[zipcode].index[-1] >= pd.to_datetime('2021-02-28'):
continue
sari_mod = SARIMAX(df,
order=model_best_df.loc[zipcode].param,
seasonal_order=model_best_df.loc[zipcode].param_seasonal,
enforce_stationarity=False,
enforce_invertibility=False).fit()
predict = sari_mod.forecast(steps = 12, dynamic = False)
# print((zipcode,predict.index[-1],predict[-1]))
print("-", end='')
predict_dict[zipcode] = predict
dfB = pd.DataFrame(predict_dict[zipcode])
dfB.columns = ['value']
dfA = cat_predict_dict[zipcode]
cat_predict_dict[zipcode] = pd.concat([dfA, dfB], axis=0)
print('\nCompleted.')
return cat_predict_dict
def calc_RMSE(test_dict, predictions_dict, bedrooms):
zipcodes = []
RMSE_list = []
hv = []
for zipcode, df in test_dict.items():
window = len(df)
RMSE = metrics.mean_squared_error(test_dict[zipcode], predictions_dict[zipcode].iloc[-window:], squared=False)
zipcodes.append(zipcode)
RMSE_list.append(RMSE)
# get last observed house value per zip code
for zipcode, df in test_dict.items():
hv.append(df.iloc[-1].value)
RMSE_data = {'zipcode': zipcodes,
'RMSE': RMSE_list,
'last_value': hv
}
RMSE_df = pd.DataFrame(RMSE_data)
RMSE_df = RMSE_df.sort_values('RMSE', axis=0, ascending=False)
RMSE_df['RMSE_vs_value'] = 100*RMSE_df.RMSE/RMSE_df.last_value
RMSE_df.set_index('zipcode', inplace=True)
print(RMSE_df)
RMSE_df.to_csv(f'data/{bedrooms}_bdrm_RMSE.csv')
return RMSE_df
def gridsearch_SARIMAX_test_predict(train_dict, test_dict, seas = 12, p_min=2, p_max=2, q_min=0, q_max=0, d_min=1, d_max=1,
s_p_min=2, s_p_max=2, s_q_min=0, s_q_max=0, s_d_min=1, s_d_max=1):
p = range(p_min, p_max+1)
q = range(q_min, q_max+1)
d = range(d_min, d_max+1)
s_p = range(s_p_min, s_p_max+1)
s_q = range(s_q_min, s_q_max+1)
s_d = range(s_d_min, s_d_max+1)
pdq = list(itertools.product(p, d, q))
seasonal_pdq = [(x[0], x[1], x[2], seas) for x in list(itertools.product(s_p, s_d, s_q))]
print('Parameters for SARIMAX grid search for test predictions...')
for i in pdq:
for s in seasonal_pdq:
print('SARIMAX: {} x {}'.format(i, s))
zipcodes = []
param_list = []
param_seasonal_list = []
RMSE_list = []
predict_dict = {}
cat_predict_dict = train_dict.copy()
for param in pdq:
for param_seasonal in seasonal_pdq:
predict_dict = {}
cat_predict_dict = train_dict.copy()
for count in range(5):
for zipcode, df in cat_predict_dict.items():
if cat_predict_dict[zipcode].index[-1] >= pd.to_datetime('2021-02-28'):
# print(param, param_seasonal)
window = len(test_dict[zipcode])
RMSE = metrics.mean_squared_error(test_dict[zipcode], cat_predict_dict[zipcode].iloc[-window:], squared=False)
zipcodes.append(zipcode)
param_list.append(param)
param_seasonal_list.append(param_seasonal)
RMSE_list.append(RMSE)
print("-", end='')
continue
sari_mod = SARIMAX(df,
order=param,
seasonal_order=param_seasonal,
enforce_stationarity=False,
enforce_invertibility=False).fit()
predict = sari_mod.forecast(steps = 12, dynamic = False)
# print((zipcode,predict.index[-1],predict[-1])) # debugging
predict_dict[zipcode] = predict
dfB = | pd.DataFrame(predict_dict[zipcode]) | pandas.DataFrame |
"""
Train a model using either `flow_from_directory` or `flow_from_dataframe`.
"""
import numpy as np
import pandas as pd
from tensorflow.keras.applications.vgg16 import preprocess_input
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
from tensorflow.keras.callbacks import EarlyStopping
from ..utils.metadata import wnids
from ..utils.paths import path_repo
from ..utils.preprocessing import crop_and_pca_generator
split = 0.1
datagen_valid = ImageDataGenerator(
preprocessing_function=preprocess_input,
validation_split=split)
early_stopping = EarlyStopping(
min_delta=0.001,
patience=2,
verbose=True,
restore_best_weights=True)
params_training = dict(
epochs=300,
verbose=1,
callbacks=[early_stopping],
use_multiprocessing=False,
workers=1)
params_generator = dict(
batch_size=256,
shuffle=True,
class_mode='categorical')
def partition_shuffled(df, labels_col='class'):
df_train, df_valid = train_test_split(df, test_size=split, stratify=df[labels_col])
return pd.concat((df_valid, df_train))
def partition_ordered(df, labels_col='class'):
df_train, df_valid = pd.DataFrame(), pd.DataFrame()
for wnid in wnids:
inds = np.flatnonzero(df[labels_col]==wnid)
val_size = int(split*len(inds))
df_train = df_train.append(df.iloc[inds[val_size:]])
df_valid = df_valid.append(df.iloc[inds[:val_size]])
return | pd.concat((df_valid, df_train)) | pandas.concat |
import gc
import glob
import os
import time
import cv2
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.layers import Input, Lambda
from tensorflow.python.keras.models import Model, load_model
# https://github.com/tensorflow/tensorflow/issues/29161
# https://github.com/keras-team/keras/issues/10340
session = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto())
K.set_session(session)
# https://www.tensorflow.org/api_docs/python/tf/compat/v1/disable_eager_execution
tf.compat.v1.disable_eager_execution()
def init_model(model_file_path):
backbone_model = load_model(model_file_path,
custom_objects={
"tf": tf,
"swish": tf.nn.swish
},
compile=False)
input_tensor = Input(shape=list(backbone_model.input_shape[1:-1]) + [1])
output_tensor = Lambda(lambda x: K.repeat_elements(x, rep=3, axis=3),
name="repeat_elements")(input_tensor)
preprocess_input_wrapper = lambda x: x / 255.0
output_tensor = Lambda(preprocess_input_wrapper,
name="preprocess_input")(output_tensor)
output_tensor_list = backbone_model(output_tensor)
model = Model(inputs=input_tensor, outputs=output_tensor_list)
return model
def process_image_content(image_content,
input_shape,
use_manual_manipulation,
intensity_threshold_percentage=0.2,
edge_threshold=5):
if use_manual_manipulation:
# Cropping
intensity_threshold = np.uint8(
np.max(image_content) * intensity_threshold_percentage)
width_mask = np.sum(
image_content[edge_threshold:-edge_threshold,
edge_threshold:-edge_threshold] > intensity_threshold,
axis=0) > 1
height_mask = np.sum(
image_content[edge_threshold:-edge_threshold,
edge_threshold:-edge_threshold] > intensity_threshold,
axis=1) > 1
width_start, width_end = np.where(width_mask)[0][[0, -1]]
width_start, width_end = max(
0, width_start - edge_threshold * 2), width_end + edge_threshold * 2
height_start, height_end = np.where(height_mask)[0][[0, -1]]
height_start, height_end = max(
0,
height_start - edge_threshold * 2), height_end + edge_threshold * 2
image_content = image_content[height_start:height_end,
width_start:width_end]
# Apply zero padding to make it square
height, width = image_content.shape
max_length = np.max(image_content.shape)
height_pad = (max_length - height) // 2
width_pad = (max_length - width) // 2
image_content = np.pad(image_content, ((height_pad,), (width_pad,)),
mode="constant",
constant_values=0)
# Resize the image
image_content = cv2.resize(image_content, input_shape[:2][::-1])
# Normalization
min_intensity, max_intensity = np.min(image_content), np.max(image_content)
image_content = ((image_content.astype(np.float32) - min_intensity) /
(max_intensity - min_intensity) * 255).astype(np.uint8)
# Add dummy dimensions
image_content = np.expand_dims(image_content, axis=-1)
image_content = np.expand_dims(image_content, axis=0)
return image_content
def perform_inference(model_file_path_pattern="inference.h5",
use_manual_manipulation=False,
batch_size=64,
ensembling_option=np.mean):
inference_start = time.time()
# Initiation
height = 137
width = 236
attribute_name_list = [
"consonant_diacritic", "grapheme_root", "vowel_diacritic"
]
# Paths of folders
root_folder_path_list = [
os.path.expanduser("~/Documents/Local Storage/Dataset"), "/kaggle/input"
]
root_folder_path_mask = [
os.path.isdir(path) for path in root_folder_path_list
]
root_folder_path = root_folder_path_list[root_folder_path_mask.index(True)]
dataset_folder_name = "bengaliai-cv19"
dataset_folder_path = os.path.join(root_folder_path, dataset_folder_name)
# Paths of files
test_parquet_file_path_list = sorted(
glob.glob(os.path.join(dataset_folder_path,
"test_image_data_*.parquet")))
# Load models
model_list = []
model_file_path_list = sorted(glob.glob(model_file_path_pattern))
assert len(model_file_path_list) > 0
for model_file_path in model_file_path_list:
print("Loading the model from {} ...".format(model_file_path))
model = init_model(model_file_path)
model_list.append(model)
input_shape = model_list[0].input_shape[1:]
# Process the test split
concatenated_image_id_list, probability_array_dict = [], {}
process_image_content_wrapper = lambda image_content: process_image_content(
image_content, input_shape, use_manual_manipulation)
for parquet_file_path in test_parquet_file_path_list:
print("Processing {} ...".format(parquet_file_path))
data_frame = | pd.read_parquet(parquet_file_path) | pandas.read_parquet |
import warnings
from typing import Iterable, Sequence, Union, List
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
from copy import deepcopy
from mikeio.eum import EUMType, ItemInfo
from .base import TimeSeries
def _parse_axis(data_shape, axis):
axis = 0 if axis == "time" else axis
if (axis == "spatial") or (axis == "space"):
if len(data_shape) == 1:
ValueError(f"axis '{axis}' not allowed for Dataset with shape {data_shape}")
axis = 1 if (len(data_shape) == 2) else tuple(range(1, len(data_shape)))
if axis is None:
axis = 0 if (len(data_shape) == 1) else tuple(range(0, len(data_shape)))
if isinstance(axis, str):
ValueError(
f"axis argument '{axis}' not supported! Must be None, int, list of int or 'time' or 'space'"
)
return axis
def _time_by_axis(time, axis):
# time: Dataset time axis;
if axis == 0:
time = pd.DatetimeIndex([time[0]])
elif isinstance(axis, Sequence) and 0 in axis:
time = pd.DatetimeIndex([time[0]])
else:
time = time
return time
def _keepdims_by_axis(axis):
# keepdims: input to numpy aggregate function
if axis == 0:
keepdims = True
else:
keepdims = False
return keepdims
def _items_except_Z_coordinate(items):
if items[0].name == "Z coordinate":
items = deepcopy(items)
items.pop(0)
return items
def _get_repeated_items(
items_in: List[ItemInfo], prefixes: List[str]
) -> List[ItemInfo]:
new_items = []
for item_in in items_in:
for prefix in prefixes:
item = deepcopy(item_in)
item.name = f"{prefix}, {item.name}"
new_items.append(item)
return new_items
def _reshape_data_by_axis(data, orig_shape, axis):
if isinstance(axis, int):
return data
if len(orig_shape) == len(axis):
shape = (1,)
data = [d.reshape(shape) for d in data]
if len(orig_shape) - len(axis) == 1:
# e.g. (0,2) for for dfs2
shape = [1] if (0 in axis) else [orig_shape[0]]
ndims = len(orig_shape)
for j in range(1, ndims):
if j not in axis:
shape.append(orig_shape[j])
data = [d.reshape(shape) for d in data]
return data
class Dataset(TimeSeries):
deletevalue = 1.0e-35
"""Dataset
Attributes
----------
data: list[np.array]
Data, potentially multivariate and multiple spatial dimensions
time: list[datetime]
Datetime of each timestep
items: list[ItemInfo]
Names, type and unit of each item in the data list
Notes
-----
Data from a specific item can be accessed using the name of the item
similar to a dictionary.
Attributes data, time, names can also be unpacked like a tuple
Examples
--------
>>> ds = mikeio.read("tests/testdata/random.dfs0")
>>> ds
<mikeio.Dataset>
Dimensions: (1000,)
Time: 2017-01-01 00:00:00 - 2017-07-28 03:00:00
Items:
0: VarFun01 <Water Level> (meter)
1: NotFun <Water Level> (meter)
>>> ds['NotFun'][0:5]
array([0.64048636, 0.65325695, nan, 0.21420799, 0.99915695])
>>> ds = mikeio.read("tests/testdata/HD2D.dfsu")
<mikeio.Dataset>
Dimensions: (9, 884)
Time: 1985-08-06 07:00:00 - 1985-08-07 03:00:00
Items:
0: Surface elevation <Surface Elevation> (meter)
1: U velocity <u velocity component> (meter per sec)
2: V velocity <v velocity component> (meter per sec)
3: Current speed <Current Speed> (meter per sec)
>>> ds2 = ds[['Surface elevation','Current speed']] # item selection by name
>>> ds2
<mikeio.Dataset>
Dimensions: (9, 884)
Time: 1985-08-06 07:00:00 - 1985-08-07 03:00:00
Items:
0: Surface elevation <Surface Elevation> (meter)
1: Current speed <Current Speed> (meter per sec)
>>> ds3 = ds2.isel([0,1,2], axis=0) # temporal selection
>>> ds3
<mikeio.Dataset>
Dimensions: (3, 884)
Time: 1985-08-06 07:00:00 - 1985-08-06 12:00:00
Items:
0: Surface elevation <Surface Elevation> (meter)
1: Current speed <Current Speed> (meter per sec)
>>> ds4 = ds3.isel([100,200], axis=1) # element selection
>>> ds4
<mikeio.Dataset>
Dimensions: (3, 2)
Time: 1985-08-06 07:00:00 - 1985-08-06 12:00:00
Items:
0: Surface elevation <Surface Elevation> (meter)
1: Current speed <Current Speed> (meter per sec)
>>> ds5 = ds[[1,0]] # item selection by position
>>> ds5
<mikeio.Dataset>
Dimensions: (1000,)
Time: 2017-01-01 00:00:00 - 2017-07-28 03:00:00
Items:
0: NotFun <Water Level> (meter)
1: VarFun01 <Water Level> (meter)
"""
def __init__(
self,
data: Union[List[np.ndarray], float],
time: Union[pd.DatetimeIndex, str],
items: Union[List[ItemInfo], List[EUMType], List[str]] = None,
):
item_infos: List[ItemInfo] = []
self._deletevalue = Dataset.deletevalue
if isinstance(time, str):
# default single-step time
time = pd.date_range(time, periods=1)
if np.isscalar(data) and isinstance(items, Sequence):
# create empty dataset
n_elements = data
n_items = len(items)
n_timesteps = len(time)
data = self.create_empty_data(
n_items=n_items, n_timesteps=n_timesteps, n_elements=n_elements
)
elif isinstance(data, Sequence):
n_items = len(data)
n_timesteps = data[0].shape[0]
else:
raise TypeError(
f"data type '{type(data)}' not supported! data must be a list of numpy arrays"
)
if items is None:
# default Undefined items
for j in range(n_items):
item_infos.append(ItemInfo(f"Item {j+1}"))
else:
for item in items:
if isinstance(item, EUMType) or isinstance(item, str):
item_infos.append(ItemInfo(item))
elif isinstance(item, ItemInfo):
item_infos.append(item)
else:
raise ValueError(f"items of type: {type(item)} is not supported")
if len(items) != n_items:
raise ValueError(
f"Number of items in iteminfo {len(items)} doesn't match the data {n_items}."
)
if len(time) != n_timesteps:
raise ValueError(
f"Number of timesteps in time {len(time)} doesn't match the data {n_timesteps}."
)
self.data = data
self.time = pd.DatetimeIndex(time)
self._items = item_infos
def __repr__(self):
out = ["<mikeio.Dataset>"]
out.append(f"Dimensions: {self.shape}")
out.append(f"Time: {self.time[0]} - {self.time[-1]}")
if not self.is_equidistant:
out.append("-- Non-equidistant calendar axis --")
if self.n_items > 10:
out.append(f"Number of items: {self.n_items}")
else:
out.append("Items:")
for i, item in enumerate(self.items):
out.append(f" {i}: {item}")
return str.join("\n", out)
def __len__(self):
return len(self.items)
def __setitem__(self, key, value):
if isinstance(key, int):
self.data[key] = value
elif isinstance(key, str):
item_lookup = {item.name: i for i, item in enumerate(self.items)}
key = item_lookup[key]
self.data[key] = value
else:
raise ValueError(f"indexing with a {type(key)} is not (yet) supported")
def __getitem__(self, key):
if isinstance(key, slice):
s = self.time.slice_indexer(key.start, key.stop)
time_steps = list(range(s.start, s.stop))
return self.isel(time_steps, axis=0)
if isinstance(key, int):
return self.data[key]
if isinstance(key, str):
item_lookup = {item.name: i for i, item in enumerate(self.items)}
key = item_lookup[key]
return self.data[key]
if isinstance(key, ItemInfo):
return self.__getitem__(key.name)
if isinstance(key, list):
data = []
items = []
item_lookup = {item.name: i for i, item in enumerate(self.items)}
for v in key:
data_item = self.__getitem__(v)
if isinstance(v, str):
i = item_lookup[v]
if isinstance(v, int):
i = v
item = self.items[i]
items.append(item)
data.append(data_item)
return Dataset(data, self.time, items)
raise ValueError(f"indexing with a {type(key)} is not (yet) supported")
def __radd__(self, other):
return self.__add__(other)
def __add__(self, other):
if isinstance(other, self.__class__):
return self._add_dataset(other)
else:
return self._add_value(other)
def __rsub__(self, other):
ds = self.__mul__(-1.0)
return other + ds
def __sub__(self, other):
if isinstance(other, self.__class__):
return self._add_dataset(other, sign=-1.0)
else:
return self._add_value(-other)
def __rmul__(self, other):
return self.__mul__(other)
def __mul__(self, other):
if isinstance(other, self.__class__):
raise NotImplemented("Multiplication is not implemented for two Datasets")
else:
return self._multiply_value(other)
def _add_dataset(self, other, sign=1.0):
self._check_datasets_match(other)
try:
data = [self[x] + sign * other[y] for x, y in zip(self.items, other.items)]
except:
raise ValueError("Could not add data in Dataset")
time = self.time.copy()
items = deepcopy(self.items)
return Dataset(data, time, items)
def _check_datasets_match(self, other):
if self.n_items != other.n_items:
raise ValueError(
f"Number of items must match ({self.n_items} and {other.n_items})"
)
for j in range(self.n_items):
if self.items[j].type != other.items[j].type:
raise ValueError(
f"Item types must match. Item {j}: {self.items[j].type} != {other.items[j].type}"
)
if self.items[j].unit != other.items[j].unit:
raise ValueError(
f"Item units must match. Item {j}: {self.items[j].unit} != {other.items[j].unit}"
)
if not np.all(self.time == other.time):
raise ValueError("All timesteps must match")
if self.shape != other.shape:
raise ValueError("shape must match")
def _add_value(self, value):
try:
data = [value + self[x] for x in self.items]
except:
raise ValueError(f"{value} could not be added to Dataset")
items = deepcopy(self.items)
time = self.time.copy()
return Dataset(data, time, items)
def _multiply_value(self, value):
try:
data = [value * self[x] for x in self.items]
except:
raise ValueError(f"{value} could not be multiplied to Dataset")
items = deepcopy(self.items)
time = self.time.copy()
return Dataset(data, time, items)
def describe(self, **kwargs):
"""Generate descriptive statistics by wrapping pandas describe()"""
all_df = [
pd.DataFrame(self.data[j].flatten(), columns=[self.items[j].name]).describe(
**kwargs
)
for j in range(self.n_items)
]
return pd.concat(all_df, axis=1)
def copy(self):
"""Returns a copy of this dataset."""
items = deepcopy(self.items)
data = [self[x].copy() for x in self.items]
time = self.time.copy()
return Dataset(data, time, items)
def to_numpy(self):
"""Stack data to a single ndarray with shape (n_items, n_timesteps, ...)
Returns
-------
np.ndarray
"""
return np.stack(self.data)
@classmethod
def combine(cls, *datasets):
"""Combine n Datasets either along items or time axis
Parameters
----------
*datasets: datasets to combine
Returns
-------
Dataset
a combined dataset
Examples
--------
>>> import mikeio
>>> from mikeio import Dataset
>>> ds1 = mikeio.read("HD2D.dfsu", items=0)
>>> ds1
<mikeio.Dataset>
Dimensions: (9, 884)
Time: 1985-08-06 07:00:00 - 1985-08-07 03:00:00
Items:
0: Surface elevation <Surface Elevation> (meter)
>>> ds2 = mikeio.read("HD2D.dfsu", items=[2,3])
>>> ds2
<mikeio.Dataset>
Dimensions: (9, 884)
Time: 1985-08-06 07:00:00 - 1985-08-07 03:00:00
Items:
0: V velocity <v velocity component> (meter per sec)
1: Current speed <Current Speed> (meter per sec)
>>> ds3 = Dataset.combine(ds1,ds2)
>>> ds3
<mikeio.Dataset>
Dimensions: (9, 884)
Time: 1985-08-06 07:00:00 - 1985-08-07 03:00:00
Items:
0: Surface elevation <Surface Elevation> (meter)
1: V velocity <v velocity component> (meter per sec)
2: Current speed <Current Speed> (meter per sec)
"""
if isinstance(datasets[0], Iterable):
if isinstance(datasets[0][0], Dataset):
datasets = datasets[0]
ds = datasets[0].copy()
for dsj in datasets[1:]:
ds = ds._combine(dsj, copy=False)
return ds
def _combine(self, other, copy=True):
try:
ds = self._concat_time(other, copy=copy)
except ValueError:
ds = self._append_items(other, copy=copy)
return ds
def append_items(self, other, inplace=False):
"""Append items from other Dataset to this Dataset"""
if inplace:
self._append_items(other, copy=False)
else:
return self._append_items(other, copy=True)
def _append_items(self, other, copy=True):
if not np.all(self.time == other.time):
# if not: create common time?
raise ValueError("All timesteps must match")
ds = self.copy() if copy else self
for j in range(other.n_items):
ds.items.append(other.items[j])
ds.data.append(other.data[j])
return ds
def concat(self, other, inplace=False):
"""Concatenate this Dataset with data from other Dataset
Parameters
---------
other: Dataset
Other dataset to concatenate with
inplace: bool, optional
Default is to return a new dataset
Returns
-------
Dataset
concatenated dataset
Examples
--------
>>> import mikeio
>>> ds1 = mikeio.read("HD2D.dfsu", time_steps=[0,1])
>>> ds2 = mikeio.read("HD2D.dfsu", time_steps=[2,3])
>>> ds1.n_timesteps
2
>>> ds3 = ds1.concat(ds2)
>>> ds3.n_timesteps
4
"""
if inplace:
ds = self._concat_time(other, copy=False)
self.data = ds.data
self.time = ds.time
else:
return self._concat_time(other, copy=True)
def _concat_time(self, other, copy=True):
self._check_all_items_match(other)
if not np.all(self.shape == other.shape):
raise ValueError("Shape of the datasets must match")
ds = self.copy() if copy else self
s1 = pd.Series(np.arange(len(ds.time)), index=ds.time, name="idx1")
s2 = pd.Series(np.arange(len(other.time)), index=other.time, name="idx2")
df12 = pd.concat([s1, s2], axis=1)
newtime = df12.index
newdata = self.create_empty_data(
n_items=ds.n_items, n_timesteps=len(newtime), shape=ds.shape[1:]
)
for j in range(ds.n_items):
idx1 = np.where(~df12["idx1"].isna())
newdata[j][idx1, :] = ds.data[j]
# if there is an overlap "other" data will be used!
idx2 = np.where(~df12["idx2"].isna())
newdata[j][idx2, :] = other.data[j]
return Dataset(newdata, newtime, ds.items)
def _check_all_items_match(self, other):
if self.n_items != other.n_items:
raise ValueError(
f"Number of items must match ({self.n_items} and {other.n_items})"
)
for j in range(self.n_items):
if self.items[j].name != other.items[j].name:
raise ValueError(
f"Item names must match. Item {j}: {self.items[j].name} != {other.items[j].name}"
)
if self.items[j].type != other.items[j].type:
raise ValueError(
f"Item types must match. Item {j}: {self.items[j].type} != {other.items[j].type}"
)
if self.items[j].unit != other.items[j].unit:
raise ValueError(
f"Item units must match. Item {j}: {self.items[j].unit} != {other.items[j].unit}"
)
def dropna(self):
"""Remove time steps where all items are NaN"""
# TODO consider all items
x = self[0]
# this seems overly complicated...
axes = tuple(range(1, x.ndim))
idx = np.where(~np.isnan(x).all(axis=axes))
idx = list(idx[0])
return self.isel(idx, axis=0)
def flipud(self):
"""Flip dataset updside down"""
self.data = [np.flip(self[x], axis=1) for x in self.items]
return self
def isel(self, idx, axis=1):
"""
Select subset along an axis.
Parameters
----------
idx: int, scalar or array_like
axis: (int, str, None), optional
axis number or "time", by default 1
Returns
-------
Dataset
dataset with subset
Examples
--------
>>> ds = mikeio.read("tests/testdata/HD2D.dfsu")
>>> ds2 = ds.isel([0,1,2], axis=0) # temporal selection
>>> ds2
DataSet(data, time, items)
Number of items: 2
Shape: (3, 884)
1985-08-06 07:00:00 - 1985-08-06 12:00:00
>>> ds3 = ds2.isel([100,200], axis=1) # element selection
>>> ds3
DataSet(data, time, items)
Number of items: 2
Shape: (3, 2)
1985-08-06 07:00:00 - 1985-08-06 12:00:00
"""
axis = _parse_axis(self.shape, axis)
if axis == 0:
time = self.time[idx]
items = self.items
else:
time = self.time
items = _items_except_Z_coordinate(self.items)
res = []
for item in items:
x = np.take(self[item.name], idx, axis=axis)
res.append(x)
ds = Dataset(res, time, items)
return ds
def aggregate(self, axis="time", func=np.nanmean, **kwargs):
"""Aggregate along an axis
Parameters
----------
axis: (int, str, None), optional
axis number or "time" or "space", by default "time"=0
func: function, optional
default np.nanmean
Returns
-------
Dataset
dataset with aggregated values
"""
items = _items_except_Z_coordinate(self.items)
axis = _parse_axis(self.shape, axis)
time = _time_by_axis(self.time, axis)
keepdims = _keepdims_by_axis(axis)
res = [
func(self[item.name], axis=axis, keepdims=keepdims, **kwargs)
for item in items
]
res = _reshape_data_by_axis(res, self.shape, axis)
return Dataset(res, time, items)
def quantile(self, q, *, axis="time", **kwargs):
"""Compute the q-th quantile of the data along the specified axis.
Wrapping np.quantile
Parameters
----------
q: array_like of float
Quantile or sequence of quantiles to compute,
which must be between 0 and 1 inclusive.
axis: (int, str, None), optional
axis number or "time" or "space", by default "time"=0
Returns
-------
Dataset
dataset with quantile values
Examples
--------
>>> ds.quantile(q=[0.25,0.75])
>>> ds.quantile(q=0.5)
>>> ds.quantile(q=[0.01,0.5,0.99], axis="space")
See Also
--------
nanquantile : quantile with NaN values ignored
"""
return self._quantile(q, axis=axis, func=np.quantile, **kwargs)
def nanquantile(self, q, *, axis="time", **kwargs):
"""Compute the q-th quantile of the data along the specified axis, while ignoring nan values.
Wrapping np.nanquantile
Parameters
----------
q: array_like of float
Quantile or sequence of quantiles to compute,
which must be between 0 and 1 inclusive.
axis: (int, str, None), optional
axis number or "time" or "space", by default "time"=0
Examples
--------
>>> ds.nanquantile(q=[0.25,0.75])
>>> ds.nanquantile(q=0.5)
>>> ds.nanquantile(q=[0.01,0.5,0.99], axis="space")
Returns
-------
Dataset
dataset with quantile values
"""
return self._quantile(q, axis=axis, func=np.nanquantile, **kwargs)
def _quantile(self, q, *, axis=0, func=np.quantile, **kwargs):
items_in = _items_except_Z_coordinate(self.items)
axis = _parse_axis(self.shape, axis)
time = _time_by_axis(self.time, axis)
keepdims = _keepdims_by_axis(axis)
qvec = [q] if np.isscalar(q) else q
qtxt = [f"Quantile {q}" for q in qvec]
itemsq = _get_repeated_items(items_in, qtxt)
res = []
for item in items_in:
qdat = func(self[item.name], q=q, axis=axis, keepdims=keepdims, **kwargs)
for j in range(len(qvec)):
qdat_item = qdat[j, ...] if len(qvec) > 1 else qdat
res.append(qdat_item)
res = _reshape_data_by_axis(res, self.shape, axis)
return Dataset(res, time, itemsq)
def max(self, axis="time"):
"""Max value along an axis
Parameters
----------
axis: (int, str, None), optional
axis number or "time" or "space", by default "time"=0
Returns
-------
Dataset
dataset with max value
See Also
--------
nanmax : Max values with NaN values removed
"""
return self.aggregate(axis=axis, func=np.max)
def min(self, axis="time"):
"""Min value along an axis
Parameters
----------
axis: (int, str, None), optional
axis number or "time" or "space", by default "time"=0
Returns
-------
Dataset
dataset with max value
See Also
--------
nanmin : Min values with NaN values removed
"""
return self.aggregate(axis=axis, func=np.min)
def mean(self, axis="time"):
"""Mean value along an axis
Parameters
----------
axis: (int, str, None), optional
axis number or "time" or "space", by default "time"=0
Returns
-------
Dataset
dataset with mean value
See Also
--------
nanmean : Mean values with NaN values removed
average: Weighted average
"""
return self.aggregate(axis=axis, func=np.mean)
def average(self, weights, axis="time"):
"""
Compute the weighted average along the specified axis.
Parameters
----------
axis: (int, str, None), optional
axis number or "time" or "space", by default "time"=0
Returns
-------
Dataset
dataset with weighted average value
See Also
--------
nanmean : Mean values with NaN values removed
aggregate: Weighted average
Examples
--------
>>> dfs = Dfsu("HD2D.dfsu")
>>> ds = dfs.read(["Current speed"])
>>> area = dfs.get_element_area()
>>> ds2 = ds.average(axis="space", weights=area)
"""
def func(x, axis, keepdims):
if keepdims:
raise NotImplementedError()
return np.average(x, weights=weights, axis=axis)
return self.aggregate(axis=axis, func=func)
def nanmax(self, axis="time"):
"""Max value along an axis (NaN removed)
Parameters
----------
axis: (int, str, None), optional
axis number or "time" or "space", by default "time"=0
Returns
-------
Dataset
dataset with max value
"""
return self.aggregate(axis=axis, func=np.nanmax)
def nanmin(self, axis="time"):
"""Min value along an axis (NaN removed)
Parameters
----------
axis: (int, str, None), optional
axis number or "time" or "space", by default "time"=0
Returns
-------
Dataset
dataset with max value
"""
return self.aggregate(axis=axis, func=np.nanmin)
def nanmean(self, axis="time"):
"""Mean value along an axis (NaN removed)
Parameters
----------
axis: (int, str, None), optional
axis number or "time" or "space", by default "time"=0
Returns
-------
Dataset
dataset with mean value
"""
return self.aggregate(axis=axis, func=np.nanmean)
def head(self, n=5):
"""Return the first n timesteps"""
nt = len(self.time)
n = min(n, nt)
time_steps = range(n)
return self.isel(time_steps, axis=0)
def tail(self, n=5):
"""Return the last n timesteps"""
nt = len(self.time)
start = max(0, nt - n)
time_steps = range(start, nt)
return self.isel(time_steps, axis=0)
def thin(self, step):
"""Return every n:th timesteps"""
nt = len(self.time)
time_steps = range(0, nt, step)
return self.isel(time_steps, axis=0)
def squeeze(self):
"""
Remove axes of length 1
Returns
-------
Dataset
"""
items = self.items
if items[0].name == "Z coordinate":
items = deepcopy(items)
items.pop(0)
time = self.time
res = [np.squeeze(self[item.name]) for item in items]
ds = Dataset(res, time, items)
return ds
def interp_time(
self,
dt: Union[float, pd.DatetimeIndex, "Dataset"],
method="linear",
extrapolate=True,
fill_value=np.nan,
):
"""Temporal interpolation
Wrapper of `scipy.interpolate.interp`
Parameters
----------
dt: float or pd.DatetimeIndex or Dataset
output timestep in seconds
method: str or int, optional
Specifies the kind of interpolation as a string (‘linear’, ‘nearest’, ‘zero’, ‘slinear’, ‘quadratic’, ‘cubic’, ‘previous’, ‘next’, where ‘zero’, ‘slinear’, ‘quadratic’ and ‘cubic’ refer to a spline interpolation of zeroth, first, second or third order; ‘previous’ and ‘next’ simply return the previous or next value of the point) or as an integer specifying the order of the spline interpolator to use. Default is ‘linear’.
extrapolate: bool, optional
Default True. If False, a ValueError is raised any time interpolation is attempted on a value outside of the range of x (where extrapolation is necessary). If True, out of bounds values are assigned fill_value
fill_value: float or array-like, optional
Default NaN. this value will be used to fill in for points outside of the time range.
Returns
-------
Dataset
Examples
--------
>>> ds = mikeio.read("tests/testdata/HD2D.dfsu")
>>> ds
<mikeio.Dataset>
Dimensions: (9, 884)
Time: 1985-08-06 07:00:00 - 1985-08-07 03:00:00
Items:
0: Surface elevation <Surface Elevation> (meter)
1: U velocity <u velocity component> (meter per sec)
2: V velocity <v velocity component> (meter per sec)
3: Current speed <Current Speed> (meter per sec)
>>> dsi = ds.interp_time(dt=1800)
>>> dsi
<mikeio.Dataset>
Dimensions: (41, 884)
Time: 1985-08-06 07:00:00 - 1985-08-07 03:00:00
Items:
0: Surface elevation <Surface Elevation> (meter)
1: U velocity <u velocity component> (meter per sec)
2: V velocity <v velocity component> (meter per sec)
3: Current speed <Current Speed> (meter per sec)
"""
if isinstance(dt, pd.DatetimeIndex):
t_out_index = dt
elif isinstance(dt, Dataset):
t_out_index = dt.time
else:
offset = pd.tseries.offsets.DateOffset(seconds=dt)
t_out_index = pd.date_range(
start=self.time[0], end=self.time[-1], freq=offset
)
t_in = self.time.values.astype(float)
t_out = t_out_index.values.astype(float)
data = [
self._interpolate_item(t_in, t_out, item, method, extrapolate, fill_value)
for item in self
]
return Dataset(data, t_out_index, self.items.copy())
@staticmethod
def _interpolate_item(intime, outtime, dataitem, method, extrapolate, fill_value):
from scipy.interpolate import interp1d
interpolator = interp1d(
intime,
dataitem,
axis=0,
kind=method,
bounds_error=not extrapolate,
fill_value=fill_value,
)
return interpolator(outtime)
def to_dataframe(self, unit_in_name=False, round_time="ms"):
"""Convert Dataset to a Pandas DataFrame
Parameters
----------
unit_in_name: bool, optional
include unit in column name, default False
Returns
-------
pd.DataFrame
"""
if len(self.data[0].shape) != 1:
self = self.squeeze()
if len(self.data[0].shape) != 1:
raise ValueError(
"Only data with a single dimension can be converted to a dataframe. Hint: use `isel` to create a subset."
)
if unit_in_name:
names = [f"{item.name} ({item.unit.name})" for item in self.items]
else:
names = [item.name for item in self.items]
data = np.asarray(self.data).T
df = pd.DataFrame(data, columns=names)
if round_time:
rounded_idx = | pd.DatetimeIndex(self.time) | pandas.DatetimeIndex |
from copy import deepcopy
import tempfile
import numpy as np
import pandas as pd
import pytest
from PIL import Image
from Bio import SeqIO
from Bio.Align import MultipleSeqAlignment
from seqlike import SeqLike
from seqlike.codon_tables import human_codon_table, human_codon_map, codon_table_to_codon_map
from . import test_path
# TODO: Turn this into a pytest fixture using Hypothesis.
# We might need to refactor out the fixtures a bit.
nt_seqs = [SeqLike(s, "nt") for s in SeqIO.parse(test_path / f"abs_nt_4.fasta", "fasta")]
s = SeqLike(SeqIO.read(test_path / f"test.fa", "fasta"), seq_type="dna")
s_aa = SeqLike(SeqIO.read(test_path / f"test.fa", "fasta"), seq_type="dna").aa()
s_aa_with_codon_map = SeqLike(
SeqIO.read(test_path / f"test.fa", "fasta"),
codon_map=human_codon_map,
seq_type="dna",
).aa()
seqs = [deepcopy(s)] * 10
seqs_aa = [deepcopy(s_aa)] * 10
seqs_aa_with_codon_map = [deepcopy(s_aa_with_codon_map)] * 10
seqs_mixed = deepcopy(seqs) + deepcopy(seqs_aa)
# ---- test list of seqs of various types ---------
seqs_list = [
(seqs, "nt"),
(seqs_aa, "aa"),
(seqs_aa_with_codon_map, "aa"),
pytest.param(
seqs_mixed,
None,
marks=pytest.mark.xfail(reason="Not a homogeneous list of SeqLikes."),
),
]
@pytest.mark.parametrize("seqs, _type", seqs_list)
def test_init_and__type(seqs, _type):
# TODO: Docstring needed for test intent.
df = pd.DataFrame({"seqs": seqs})
assert isinstance(df, pd.DataFrame)
assert df.seqs.seq._type == _type.upper()
@pytest.mark.parametrize("seqs, _type", seqs_list)
def test_write(seqs, _type):
# TODO: Docstring needed for test intent.
df = pd.DataFrame({"seqs": seqs})
# r+ is read-writable
with tempfile.NamedTemporaryFile(mode="r+") as tempf:
df["seqs"].seq.write(tempf, "fasta")
# rewind file after writing
tempf.seek(0)
read_seqs = pd.Series(SeqLike(s, seq_type=_type) for s in SeqIO.parse(tempf, "fasta"))
for seq1, seq2 in zip(read_seqs, df["seqs"]):
assert str(seq1) == str(seq2)
@pytest.mark.parametrize("seqs, _type", seqs_list)
def test_plot(seqs, _type):
# TODO: Docstring needed for test intent.
df = pd.DataFrame({"seqs": seqs})
assert isinstance(df.seqs.seq.plot(use_bokeh=False), Image.Image)
@pytest.mark.parametrize("seqs, _type", seqs_list)
def test_align(seqs, _type):
# TODO: Docstring needed for test intent.
df = pd.DataFrame({"seqs": seqs})
assert isinstance(df.seqs.seq.align(), pd.Series)
@pytest.mark.parametrize("seqs, _type", seqs_list)
def test_as_alignment(seqs, _type):
# TODO: Docstring needed for test intent.
df = pd.DataFrame({"seqs": seqs})
assert isinstance(df.seqs.seq.as_alignment(), MultipleSeqAlignment)
@pytest.mark.parametrize("seqs, _type", seqs_list)
def test_as_counts(seqs, _type):
# TODO: Docstring needed for test intent.
df = pd.DataFrame({"seqs": seqs})
as_counts = df.seqs.seq.as_counts()
assert isinstance(as_counts, np.ndarray)
assert as_counts.shape == (max(len(s) for s in seqs), len(seqs[0].alphabet))
@pytest.mark.parametrize("seqs, _type", seqs_list)
def test_extend_ambiguous_counts(seqs, _type):
# TODO: Docstring needed for test intent.
df = pd.DataFrame({"seqs": seqs})
extended_counts = df.seqs.seq._extend_ambiguous_counts()
assert isinstance(extended_counts, np.ndarray)
@pytest.mark.parametrize("seqs, _type", seqs_list)
def test_consensus(seqs, _type):
# TODO: Docstring needed for test intent.
df = pd.DataFrame({"seqs": seqs})
consensus = df.seqs.seq.consensus()
assert isinstance(consensus, SeqLike)
assert len(consensus) == max(len(s) for s in seqs)
@pytest.mark.parametrize("seqs, _type", seqs_list)
def test_degenerate(seqs, _type):
# TODO: Docstring needed for test intent.
df = pd.DataFrame({"seqs": seqs})
degenerate = df.seqs.seq.degenerate()
assert isinstance(degenerate, SeqLike)
assert len(degenerate) == max(len(s) for s in seqs)
assert set(degenerate).issubset(set(df.seqs.seq.alphabet))
def test_consensus2():
# TODO: Docstring needed for test intent.
df = pd.DataFrame({"seqs": nt_seqs})
consensus = df.seqs.seq.consensus()
assert (
str(consensus)
== "TCAATTGGGGGAGGAGCTCTGGTGGAGGCGGTAGCGGAGGCGGAGGGTCGGCTAGCCAAGTCCAATTGGTTGAATCTGGTGGTGGTGTTGTTCAACCAGGTGGTTCTTTGAGATTGTCTT"
)
def test_degenerate2():
# TODO: Docstring needed for test intent.
df = pd.DataFrame({"seqs": nt_seqs})
degenerate = df.seqs.seq.degenerate()
assert (
str(degenerate)
== "TCAATTGGGGGAGGAGCTCTSGTGGWGGCVGTAGCGGAGKCGGAGGKTCSGCWAGCCAAGTCCAATTGGTTGAATCTGGTGGTGGTGTTGTTCAACCAGGTGGTTCTTTGAGATTGTCTT"
)
@pytest.mark.parametrize("seqs, _type", seqs_list)
def test_max_length(seqs, _type):
# TODO: Docstring needed for test intent.
df = pd.DataFrame({"seqs": seqs})
assert df.seqs.seq.max_length() == max(len(x) for x in seqs)
@pytest.mark.parametrize("seqs, _type", seqs_list)
def test_get_seq_by_id(seqs, _type):
# TODO: Docstring needed for test intent.
df = pd.DataFrame({"seqs": seqs})
assert df.seqs.seq.get_seq_by_id(seqs[0].id) == seqs[0]
@pytest.mark.parametrize("seqs, _type", seqs_list)
def test__getitem__(seqs, _type):
# TODO: Docstring needed for test intent.
df = | pd.DataFrame({"seqs": seqs}) | pandas.DataFrame |
import pandas as pd
import numpy as np
import git
import os
import sys
import bokeh.io
import bokeh.application
import bokeh.application.handlers
import bokeh.models
import bokeh.plotting as bkp
from bokeh.models import Span
import holoviews as hv
from pathlib import Path
# from bokeh.io import export_png
#-- Setup paths
# Get parent directory using git
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.working_dir
# Change working directory to parent directory
os.chdir(homedir)
# Add 'Dan' directory to the search path for imports
sys.path.append('Dan')
# Import our custom cube managing functions
import cube_formatter as cf
#-- Setup bokeh
bokeh.io.output_notebook()
hv.extension('bokeh')
#-- Control parameters
# Top N counties to plot with the most deaths
# Set to -1 to plot all
plotN = 20
shift = 20
# Data Manipulation flags (should match those used in creating submission file)
isAllocCounties = True # Flag to distribue state deaths amongst counties
isComputeDaily = False # Flag to translate cummulative data to daily counts
#- Plot-type control flags
isStateWide = False # Flag to plot state-wise data (will use nyt_states file for true_df)
# The raw cube won't be affected so make sure it is also state-wise data
# AND cumulative since there is only cumulative nyt_us_states data
isCumul = True # Flag to denote that the plot should be cumulative, not daily deaths
# ** Only affects county-level data since state-wide is implicitly cumulative
# This sets which county-wide nyt file is used and sets the plot y-axis label
# Key days (should match those used in creating the cube)
global_dayzero = | pd.to_datetime('2020 Jan 21') | pandas.to_datetime |
"""Visualize financial instruments."""
import math
import matplotlib.pyplot as plt
import mplfinance as mpf
import numpy as np
import pandas as pd
import seaborn as sns
from .utils import validate_df
class Visualizer:
"""Base visualizer class not intended for direct use."""
@validate_df(columns={'open', 'high', 'low', 'close'})
def __init__(self, df):
"""Visualizer has a `pandas.DataFrame` object as an attribute."""
self.data = df
@staticmethod
def add_reference_line(ax, x=None, y=None, **kwargs):
"""
Static method for adding reference lines to plots.
Parameters:
- ax: The matplotlib `Axes` object to add the reference line to.
- x, y: The x, y value to draw the line at as a
single value or numpy array-like structure.
- For horizontal: pass only `y`
- For vertical: pass only `x`
- For AB line: pass both `x` and `y`
for all coordinates on the line
- kwargs: Additional keyword arguments to pass to the plotting
function.
Returns:
The matplotlib `Axes` object passed in.
"""
try:
# in case numpy array-like structures are passed -> AB line
if x.shape and y.shape:
ax.plot(x, y, **kwargs)
except:
# error triggers if at least one isn't a numpy array-like structure
try:
if not x and not y:
raise ValueError(
'You must provide an `x` or a `y` at a minimum.'
)
elif x and not y:
# vertical line
ax.axvline(x, **kwargs)
elif not x and y:
# horizontal line
ax.axhline(y, **kwargs)
except:
raise ValueError(
'If providing only `x` or `y`, it must be a single value.'
)
ax.legend()
return ax
@staticmethod
def shade_region(ax, x=tuple(), y=tuple(), **kwargs):
"""
Static method for shading a region on a plot.
Parameters:
- ax: The matplotlib `Axes` object to add the shaded region to.
- x: Tuple with the `xmin` and `xmax` bounds for the rectangle
drawn vertically.
- y: Tuple with the `ymin` and `ymax` bounds for the rectangle
drawn horizontally.
- kwargs: Additional keyword arguments to pass to the plotting
function.
Returns:
The matplotlib `Axes` object passed in.
"""
if not x and not y:
raise ValueError(
'You must provide an x or a y min/max tuple at a minimum.'
)
elif x and y:
raise ValueError('You can only provide `x` or `y`.')
elif x and not y:
# vertical span
ax.axvspan(*x, **kwargs)
elif not x and y:
# horizontal span
ax.axhspan(*y, **kwargs)
return ax
@staticmethod
def _iter_handler(items):
"""
Static method for making a list out of an item if it isn't a list or
tuple already.
Parameters:
- items: The variable to make sure it is a list.
Returns:
The input as a list or tuple.
"""
if not isinstance(items, (list, tuple)):
items = [items]
return items
def _window_calc(self, column, periods, name, func, named_arg, **kwargs):
"""
To be implemented by subclasses. Defines how to add lines resulting
from window calculations.
"""
raise NotImplementedError('To be implemented by subclasses.')
def moving_average(self, column, periods, **kwargs):
"""
Add line(s) for the moving average of a column.
Parameters:
- column: The name of the column to plot.
- periods: The rule or list of rules for resampling,
like '20D' for 20-day periods.
- kwargs: Additional arguments to pass down to the plotting function.
Returns:
A matplotlib `Axes` object.
"""
return self._window_calc(
column, periods, name='MA',
func=pd.DataFrame.resample, named_arg='rule', **kwargs
)
def exp_smoothing(self, column, periods, **kwargs):
"""
Add line(s) for the exponentially smoothed moving average of a column.
Parameters:
- column: The name of the column to plot.
- periods: The span or list of spans for smoothing,
like 20 for 20-day periods.
- kwargs: Additional arguments to pass down to the plotting function.
Returns:
A matplotlib `Axes` object.
"""
return self._window_calc(
column, periods, name='EWMA',
func=pd.DataFrame.ewm, named_arg='span', **kwargs
)
# abstract methods for subclasses to define
def evolution_over_time(self, column, **kwargs):
"""To be implemented by subclasses for generating line plots."""
raise NotImplementedError('To be implemented by subclasses.')
def boxplot(self, **kwargs):
"""To be implemented by subclasses for generating box plots."""
raise NotImplementedError('To be implemented by subclasses.')
def histogram(self, column, **kwargs):
"""To be implemented by subclasses for generating histograms."""
raise NotImplementedError('To be implemented by subclasses.')
def after_hours_trades(self):
"""To be implemented by subclasses for showing the effect of after-hours trading."""
raise NotImplementedError('To be implemented by subclasses.')
def pairplot(self, **kwargs):
"""To be implemented by subclasses for generating pairplots."""
raise NotImplementedError('To be implemented by subclasses.')
class StockVisualizer(Visualizer):
"""Visualizer for a single stock."""
def evolution_over_time(self, column, **kwargs):
"""
Visualize the evolution over time of a column.
Parameters:
- column: The name of the column to visualize.
- kwargs: Additional keyword arguments to pass down
to the plotting function.
Returns:
A matplotlib `Axes` object.
"""
return self.data.plot.line(y=column, **kwargs)
def boxplot(self, **kwargs):
"""
Generate box plots for all columns.
Parameters:
- kwargs: Additional keyword arguments to pass down
to the plotting function.
Returns:
A matplotlib `Axes` object.
"""
return self.data.plot(kind='box', **kwargs)
def histogram(self, column, **kwargs):
"""
Generate the histogram of a given column.
Parameters:
- column: The name of the column to visualize.
- kwargs: Additional keyword arguments to pass down
to the plotting function.
Returns:
A matplotlib `Axes` object.
"""
return self.data.plot.hist(y=column, **kwargs)
def candlestick(self, date_range=None, resample=None, volume=False, **kwargs):
"""
Create a candlestick plot for the OHLC data with optional aggregation,
subset of the date range, and volume.
Parameters:
- date_range: String or `slice()` of dates to pass to `loc[]`, if `None`
the plot will be for the full range of the data.
- resample: The offset to use for resampling the data, if desired.
- volume: Whether to show a bar plot for volume traded under the candlesticks
- kwargs: Additional keyword arguments to pass down to `mplfinance.plot()`
Note: `mplfinance.plot()` doesn't return anything. To save your plot, pass in `savefig=file.png`.
"""
if not date_range:
date_range = slice(self.data.index.min(), self.data.index.max())
plot_data = self.data.loc[date_range]
if resample:
agg_dict = {
'open': 'first', 'close': 'last',
'high': 'max', 'low': 'min', 'volume': 'sum'
}
plot_data = plot_data.resample(resample).agg({col: agg_dict[col] for col in plot_data.columns if col in agg_dict})
mpf.plot(plot_data, type='candle', volume=volume, **kwargs)
def after_hours_trades(self):
"""
Visualize the effect of after-hours trading on this asset.
Returns:
A matplotlib `Axes` object.
"""
after_hours = self.data.open - self.data.close.shift()
monthly_effect = after_hours.resample('1M').sum()
fig, axes = plt.subplots(1, 2, figsize=(15, 3))
after_hours.plot(
ax=axes[0],
title='After-hours trading\n(Open Price - Prior Day\'s Close)'
).set_ylabel('price')
monthly_effect.index = monthly_effect.index.strftime('%Y-%b')
monthly_effect.plot(
ax=axes[1],
kind='bar',
title='After-hours trading monthly effect',
color=np.where(monthly_effect >= 0, 'g', 'r'),
rot=90
).axhline(0, color='black', linewidth=1)
axes[1].set_ylabel('price')
return axes
@staticmethod
def fill_between(y1, y2, title, label_higher, label_lower, figsize, legend_x):
"""
Visualize the difference between assets.
Parameters:
- y1, y2: Data to be plotted with fill between y2 - y1.
- title: The title for the plot.
- label_higher: String label for when y2 is higher than y1.
- label_lower: String label for when y2 is lower than y1.
- figsize: A tuple of (width, height) for the plot dimensions.
- legend_x: Where to place the legend below the plot.
Returns:
A matplotlib `Axes` object.
"""
is_higher = y2 - y1 > 0
fig = plt.figure(figsize=figsize)
for exclude_mask, color, label in zip(
(is_higher, np.invert(is_higher)),
('g', 'r'),
(label_higher, label_lower)
):
plt.fill_between(
y2.index, y2, y1, figure=fig,
where=exclude_mask, color=color, label=label
)
plt.suptitle(title)
plt.legend(bbox_to_anchor=(legend_x, -0.1), framealpha=0, ncol=2)
for spine in ['top', 'right']:
fig.axes[0].spines[spine].set_visible(False)
return fig.axes[0]
def open_to_close(self, figsize=(10, 4)):
"""
Visualize the daily change in price from open to close.
Parameters:
- figsize: A tuple of (width, height) for the plot dimensions.
Returns:
A matplotlib `Axes` object.
"""
ax = self.fill_between(
self.data.open, self.data.close, figsize=figsize,
legend_x=0.67, title='Daily price change (open to close)',
label_higher='price rose', label_lower='price fell'
)
ax.set_ylabel('price')
return ax
def fill_between_other(self, other_df, figsize=(10, 4)):
"""
Visualize the difference in closing price between assets.
Parameters:
- other_df: The dataframe with the other asset's data.
- figsize: A tuple of (width, height) for the plot dimensions.
Returns:
A matplotlib `Axes` object.
"""
ax = self.fill_between(
other_df.open, self.data.close, figsize=figsize, legend_x=0.7,
title='Differential between asset closing price (this - other)',
label_higher='asset is higher', label_lower='asset is lower'
)
ax.set_ylabel('price')
return ax
def _window_calc(self, column, periods, name, func, named_arg, **kwargs):
"""
Helper method for plotting a series and adding reference lines using
a window calculation.
Parameters:
- column: The name of the column to plot.
- periods: The rule/span or list of them to pass to the
resampling/smoothing function, like '20D' for 20-day periods
(for resampling) or 20 for a 20-day span (smoothing)
- name: The name of the window calculation (to show in the legend).
- func: The window calculation function.
- named_arg: The name of the argument `periods` is being passed as.
- kwargs: Additional arguments to pass down to the plotting function.
Returns:
A matplotlib `Axes` object.
"""
ax = self.data.plot(y=column, **kwargs)
for period in self._iter_handler(periods):
self.data[column].pipe(
func, **{named_arg: period}
).mean().plot(
ax=ax,
linestyle='--',
label=f'{period if isinstance(period, str) else str(period) + "D"} {name}'
)
plt.legend()
return ax
def pairplot(self, **kwargs):
"""
Generate a seaborn pairplot for this asset.
Parameters:
- kwargs: Keyword arguments to pass down to `sns.pairplot()`
Returns:
A seaborn pairplot
"""
return sns.pairplot(self.data, **kwargs)
def jointplot(self, other, column, **kwargs):
"""
Generate a seaborn jointplot for given column in asset compared to
another asset.
Parameters:
- other: The other asset's dataframe
- column: The column name to use for the comparison.
- kwargs: Keyword arguments to pass down to `sns.jointplot()`
Returns:
A seaborn jointplot
"""
return sns.jointplot(
x=self.data[column],
y=other[column],
**kwargs
)
def correlation_heatmap(self, other):
"""
Plot the correlations between this asset and
another one with a heatmap.
Parameters:
- other: The other dataframe.
Returns:
A seaborn heatmap
"""
corrs = self.data.pct_change().corrwith(other.pct_change())
corrs = corrs[~ | pd.isnull(corrs) | pandas.isnull |
from tensorflow.keras.wrappers.scikit_learn import KerasRegressor
import numpy as np
import sys
import math
import os
import json
import csv
import pandas
import keras
from keras.utils.vis_utils import model_to_dot
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, AveragePooling1D, BatchNormalization, Activation, concatenate, ReLU
from tensorflow.keras.wrappers.scikit_learn import KerasRegressor
from keras.utils.vis_utils import plot_model
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from tensorflow.keras import backend as K
from sklearn.metrics import r2_score
from tensorflow.keras import regularizers
from sklearn.model_selection import train_test_split, StratifiedKFold, cross_val_score, KFold
import tensorflow as tf
from scipy.stats import spearmanr, pearsonr
import matplotlib.pyplot as plt
from data_preprocess import preprocess
from sklearn.utils import shuffle
import random
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.layers import Lambda
from tensorflow import keras
from keras.models import Model
from numpy import newaxis
from sklearn.preprocessing import MinMaxScaler
#Reproducibility
seed = 460
np.random.seed(seed)
tf.random.set_seed(seed)
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
#Create new loss function (Rank mse)
@tf.function()
def rank_mse(yTrue, yPred):
def calculate_loss(yTrue, yPred):
print(f'[INFO] Print yTrue: {yTrue}')
print(f'[INFO] Print yPred: {yPred}')
#do
lambda_value=0.5
size = yTrue.get_shape()[1]
#pass lambda value as tensor
lambda_value = tf.convert_to_tensor(lambda_value,dtype="float32")
#get vector ranks
rank_yTrue = tf.argsort(tf.argsort(yTrue))
rank_yPred = tf.argsort(tf.argsort(yPred))
print(f'[INFO] Print ranked yTrue: {rank_yTrue}')
print(f'[INFO] Print ranked yPred: {rank_yPred}')
#calculate losses
#calculate mse
print(f'\n[INFO] Calculating normal mse')
mse = tf.subtract(yTrue,yPred)
print(f'[INFO] subtract mse: {mse}')
mse = tf.square(mse)
print(f'[INFO] square mse: {mse}')
mse = tf.math.reduce_sum(mse).numpy()
print(f'[INFO] reduce sum mse: {mse}')
mse = tf.divide(mse,size)
print(f'[INFO] divide by size mse: {mse}')
mse = tf.cast(mse,dtype="float32")
print(f'[INFO] final mse: {mse}')
#calculate rank_mse
print(f'\n[INFO] Calculating rank mse')
rank_mse = tf.cast(tf.subtract(rank_yTrue,rank_yPred),dtype="float32")
print(f'[INFO] substract rank_mse: {rank_mse}')
rank_mse = tf.square(rank_mse)
print(f'[INFO] square rank_mse: {rank_mse}')
rank_mse = tf.math.reduce_sum(rank_mse).numpy()
print(f'[INFO] reduce sum rank_mse: {rank_mse}')
rank_mse = tf.math.sqrt(rank_mse)
print(f'[INFO] square root rank_mse: {rank_mse}')
rank_mse = tf.divide(rank_mse,size)
print(f'[INFO] divide by size rank_mse: {rank_mse}')
print(f'[INFO] final rank_mse: {rank_mse}')
#(1 - lambda value)* mse(part a of loss)
loss_a = tf.multiply(tf.subtract(tf.ones(1,dtype="float32"),lambda_value),mse)
print(f'\n[INFO] Final loss a: {loss_a}')
#lambda value * rank_mse (part b of loss)
loss_b = tf.multiply(lambda_value,rank_mse)
print(f'[INFO] Final loss b: {loss_b}')
#final loss
loss = tf.add(loss_a,loss_b)
print(f'[INFO] Final loss: {loss}')
return loss
debug=True
if not debug:
with HiddenPrints():
loss = calculate_loss(yTrue, yPred)
return loss
else:
loss = calculate_loss(yTrue, yPred)
return loss
class ConvolutionLayer(Conv1D):
def __init__(self, filters,
kernel_size,
data_format,
padding='valid',
activation=None,
use_bias=False,
kernel_initializer='glorot_uniform',
__name__ = 'ConvolutionLayer',
**kwargs):
super(ConvolutionLayer, self).__init__(filters=filters,
kernel_size=kernel_size,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
**kwargs)
self.run_value = 1
def call(self, inputs):
## shape of self.kernel is (12, 4, 512)
##the type of self.kernel is <class 'tensorflow.python.ops.resource_variable_ops.ResourceVariable'>
if self.run_value > 2:
x_tf = self.kernel ##x_tf after reshaping is a tensor and not a weight variable :(
x_tf = tf.transpose(x_tf, [2, 0, 1])
alpha = 100
beta = 1/alpha
bkg = tf.constant([0.295, 0.205, 0.205, 0.295])
bkg_tf = tf.cast(bkg, tf.float32)
filt_list = tf.map_fn(lambda x:
tf.math.scalar_mul(beta, tf.subtract(tf.subtract(tf.subtract(tf.math.scalar_mul(alpha, x),
tf.expand_dims(tf.math.reduce_max(tf.math.scalar_mul(alpha, x), axis = 1), axis = 1)),
tf.expand_dims(tf.math.log(tf.math.reduce_sum(tf.math.exp(tf.subtract(tf.math.scalar_mul(alpha, x),
tf.expand_dims(tf.math.reduce_max(tf.math.scalar_mul(alpha, x), axis = 1), axis = 1))), axis = 1)), axis = 1)),
tf.math.log(tf.reshape(tf.tile(bkg_tf, [tf.shape(x)[0]]), [tf.shape(x)[0], tf.shape(bkg_tf)[0]])))), x_tf)
#print("type of output from map_fn is", type(filt_list)) ##type of output from map_fn is <class 'tensorflow.python.framework.ops.Tensor'> shape of output from map_fn is (10, 12, 4)
#print("shape of output from map_fn is", filt_list.shape)
#transf = tf.reshape(filt_list, [12, 4, self.filters]) ##12, 4, 512
transf = tf.transpose(filt_list, [1, 2, 0])
##type of transf is <class 'tensorflow.python.framework.ops.Tensor'>
outputs = self._convolution_op(inputs, transf) ## type of outputs is <class 'tensorflow.python.framework.ops.Tensor'>
else:
outputs = self._convolution_op(inputs, self.kernel)
self.run_value += 1
return outputs
class nn_model:
def __init__(self, fasta_file, readout_file, filters, kernel_size, pool_type, regularizer, activation_type, epochs, batch_size, loss_func, optimizer,scaling,model_name):
"""initialize basic parameters"""
self.filters = filters
self.kernel_size = kernel_size
self.pool_type = pool_type
self.regularizer = regularizer
self.activation_type = activation_type
self.epochs = epochs
self.batch_size = batch_size
self.fasta_file = fasta_file
self.readout_file = readout_file
self.loss_func = loss_func
self.optimizer = optimizer
self.scaling = scaling
self.model_name = model_name
#self.eval()
self.cross_val()
#self.cross_val_binning()
def create_model(self):
# different metric functions
def coeff_determination(y_true, y_pred):
SS_res = K.sum(K.square( y_true-y_pred ))
SS_tot = K.sum(K.square(y_true - K.mean(y_true)))
return (1 - SS_res/(SS_tot + K.epsilon()))
def spearman_fn(y_true, y_pred):
return tf.py_function(spearmanr, [tf.cast(y_pred, tf.float32),
tf.cast(y_true, tf.float32)], Tout=tf.float32)
# building model
prep = preprocess(self.fasta_file, self.readout_file)
# if want mono-nucleotide sequences
dict = prep.one_hot_encode()
# if want dinucleotide sequences
#dict = prep.dinucleotide_encode()
readout = dict["readout"]
fw_fasta = dict["forward"]
rc_fasta = dict["reverse"]
dim_num = fw_fasta.shape
# To build this model with the functional API,
# you would start by creating an input node:
forward = keras.Input(shape=(dim_num[1],dim_num[2]), name = 'forward')
reverse = keras.Input(shape=(dim_num[1],dim_num[2]), name = 'reverse')
#first_layer = Conv1D(filters=self.filters, kernel_size=self.kernel_size, data_format='channels_last', input_shape=(dim_num[1],dim_num[2]), use_bias = True)
first_layer = ConvolutionLayer(filters=self.filters, kernel_size=self.kernel_size, strides=1, data_format='channels_last', use_bias = True)
fw = first_layer(forward)
bw = first_layer(reverse)
concat = concatenate([fw, bw], axis=1)
pool_size_input = concat.shape[1]
concat_relu = ReLU()(concat)
if self.pool_type == 'Max':
pool_layer = MaxPooling1D(pool_size=pool_size_input)(concat_relu)
#pool_layer = MaxPooling1D(pool_size=12)(concat_relu)
elif self.pool_type == 'Ave':
pool_layer = AveragePooling1D(pool_size=pool_size_input)(concat_relu)
elif self.pool_type == 'custom':
def out_shape(input_shape):
shape = list(input_shape)
print(input_shape)
shape[0] = 10
return tuple(shape)
#model.add(Lambda(top_k, arguments={'k': 10}))
def top_k(inputs, k):
# tf.nn.top_k Finds values and indices of the k largest entries for the last dimension
print(inputs.shape)
inputs2 = tf.transpose(inputs, [0,2,1])
new_vals = tf.nn.top_k(inputs2, k=k, sorted=True).values
# transform back to (None, 10, 512)
return tf.transpose(new_vals, [0,2,1])
pool_layer = Lambda(top_k, arguments={'k': 2})(concat_relu)
pool_layer = AveragePooling1D(pool_size=2)(pool_layer)
elif self.pool_type == 'custom_sum':
## apply relu function before custom_sum functions
def summed_up(inputs):
#nonzero_vals = tf.keras.backend.relu(inputs)
new_vals = tf.math.reduce_sum(inputs, axis = 1, keepdims = True)
return new_vals
pool_layer = Lambda(summed_up)(concat_relu)
else:
raise NameError('Set the pooling layer name correctly')
#layer = Conv1D(filters=128, kernel_size=12)(pool_layer)
#layer = Dense(16)(pool_layer)
#pool_size_input = layer.shape[1]
#layer = MaxPooling1D(pool_size=pool_size_input)(layer)
# flatten the layer (None, 512)
flat = Flatten()(pool_layer)
if self.activation_type == 'linear':
if self.regularizer == 'L_1':
outputs = Dense(1, kernel_initializer='normal', kernel_regularizer=regularizers.l1(0.001), activation= self.activation_type)(flat)
elif self.regularizer == 'L_2':
outputs = Dense(1, kernel_initializer='normal', kernel_regularizer=regularizers.l2(0.001), activation= self.activation_type)(flat)
else:
raise NameError('Set the regularizer name correctly')
elif self.activation_type =='sigmoid':
outputs = Dense(1, activation= self.activation_type)(flat)
model = keras.Model(inputs=[forward, reverse], outputs=outputs)
model.summary()
if self.loss_func == 'mse':
model.compile(loss='mean_squared_error', optimizer=self.optimizer, metrics = [coeff_determination, spearman_fn])
elif self.loss_func == 'huber':
loss_huber = keras.losses.Huber(delta=1)
model.compile(loss=loss_huber, optimizer=self.optimizer, metrics = [coeff_determination, spearman_fn])
elif self.loss_func == 'mae':
loss_mae = keras.losses.MeanAbsoluteError()
model.compile(loss=loss_mae, optimizer=self.optimizer, metrics = [coeff_determination, spearman_fn])
elif self.loss_func == 'rank_mse':
model.compile(loss=rank_mse, optimizer=self.optimizer, metrics = [coeff_determination, spearman_fn])
elif self.loss_func == 'poisson':
poisson_loss = keras.losses.Poisson()
model.compile(loss=poisson_loss, optimizer=self.optimizer, metrics = [coeff_determination, spearman_fn])
else:
raise NameError('Unrecognized Loss Function')
return model
def eval(self):
# Preprocess the data to one-hot encoded vector
prep = preprocess(self.fasta_file, self.readout_file)
dict = prep.one_hot_encode()
# if want dinucleotide sequences
# dict = prep.dinucleotide_encode()
# print maximum length without truncation
np.set_printoptions(threshold=sys.maxsize)
fw_fasta = dict["forward"]
rc_fasta = dict["reverse"]
readout = dict["readout"]
if self.activation_type == 'linear':
readout = np.log2(readout)
if self.scaling == None:
readout = np.ndarray.tolist(readout)
elif self.scaling == "0_1":
scaler = MinMaxScaler(feature_range=(0,1))
scaler.fit(readout.reshape(-1, 1))
readout = scaler.transform(readout.reshape(-1, 1))
readout = readout.flatten()
readout = np.ndarray.tolist(readout)
# 90% Train, 10% Test
x1_train, x1_test, y1_train, y1_test = train_test_split(fw_fasta, readout, test_size=0.1, random_state=seed)
x2_train, x2_test, y2_train, y2_test = train_test_split(rc_fasta, readout, test_size=0.1, random_state=seed)
model = self.create_model()
# change from list to numpy array
y1_train = np.asarray(y1_train)
y1_test = np.asarray(y1_test)
y2_train = np.asarray(y2_train)
y2_test = np.asarray(y2_test)
# Without early stopping
#history = model.fit({'forward': x1_train, 'reverse': x2_train}, y1_train, epochs=self.epochs, batch_size=self.batch_size, validation_split=0.1)
# Early stopping
#callback = EarlyStopping(monitor='loss', min_delta=0.001, patience=3, verbose=0, mode='auto', baseline=None, restore_best_weights=False)
callback = EarlyStopping(monitor='val_spearman_fn', min_delta=0.0001, patience=3, verbose=0, mode='max', baseline=None, restore_best_weights=False)
history = model.fit({'forward': x1_train, 'reverse': x2_train}, y1_train, epochs=self.epochs, batch_size=self.batch_size, validation_split=0.1, callbacks = [callback])
history2 = model.evaluate({'forward': x1_test, 'reverse': x2_test}, y1_test)
pred = model.predict({'forward': x1_test, 'reverse': x2_test})
#viz_prediction(pred, y1_test, '{} regression model'.format(self.loss_func), '{}2.png'.format(self.loss_func))
print("Seed number is {}".format(seed))
print('metric values of model.evaluate: '+ str(history2))
print('metrics names are ' + str(model.metrics_names))
def cross_val(self):
# Preprocess the data
prep = preprocess(self.fasta_file, self.readout_file)
dict = prep.one_hot_encode()
# If want dinucleotide sequences
#dict = prep.dinucleotide_encode()
fw_fasta = dict["forward"]
rc_fasta = dict["reverse"]
readout = dict["readout"]
names = prep.read_fasta_name_into_array()
if self.activation_type == 'linear':
readout = np.log2(readout)
if self.scaling == 'no_scaling':
readout = np.ndarray.tolist(readout)
elif self.scaling == "0_1":
scaler = MinMaxScaler(feature_range=(0,1))
scaler.fit(readout.reshape(-1, 1))
readout = scaler.transform(readout.reshape(-1, 1))
readout = readout.flatten()
readout = np.ndarray.tolist(readout)
elif self.scaling == "-1_1":
scaler = MinMaxScaler(feature_range=(-1,1))
scaler.fit(readout.reshape(-1, 1))
readout = scaler.transform(readout.reshape(-1, 1))
readout = readout.flatten()
readout = np.ndarray.tolist(readout)
forward_shuffle, readout_shuffle, names_shuffle = shuffle(fw_fasta, readout, names, random_state=seed)
reverse_shuffle, readout_shuffle, names_shuffle = shuffle(rc_fasta, readout, names, random_state=seed)
readout_shuffle = np.array(readout_shuffle)
# initialize metrics to save values
metrics = []
# Provides train/test indices to split data in train/test sets.
kFold = StratifiedKFold(n_splits=10)
ln = np.zeros(len(readout_shuffle))
pred_vals = | pandas.DataFrame() | pandas.DataFrame |
'''
Pd_exp: Prisoner's Dilemma Experimentation
==========================================
Generate data for numerous Prisoner's Dilemma tournaments or systems of
tournaments.
Classes:
PdTournament
A class to represent a tournament.
PdSystem
Generate data for multiple tournaments and organize it into a dataframe
PdExp
Generate data for multiple systems
Functions:
grouper(iterable, n, fillvalue=None) -> iterable of n-sized-chunks
Collect data into fixed-length chunks or blocks
avg_normalised_state(object, tuple) -> float
Returns the tournament average for given state distribution (e.g.
(C,C), (D,D), (C,D), (D,C))
'''
from axelrod import Action, game, Tournament
import copy
from itertools import zip_longest
import numpy as np
import pandas as pd
from pathlib import Path
import code.settings
import subprocess
# Helper Functions
def grouper(iterable, n, fillvalue=None):
'''
Collect data into fixed-length chunks or blocks
Parameters:
iterable (object): an iterable type
n (int): integer to indicate size of blocks to group iterable units
fillvalue (str): if no more elements are available to create a
block, fillvalue is used to finish final block
Returns:
new-iterable (object): new-iterable that is composed of n-length
blocks of the original iterable.
ex: grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
'''
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
def avg_normalised_state(results_obj, state_tupl):
'''
Returns the tournament average for given state distribution (e.g.
(C,C), (D,D), (C,D), (D,C))
Parameters:
results_obj (object): output generated from Axelrod
tournament.play()
state_tupl (tuple): player-opponent action pair that is the game
state of interest (e.g. (Action.C, Action.C) for mutual
cooperation)
Returns:
(float): average distribution of state_tupl for the tournament
that results_obj describes.
'''
norm_state_dist = results_obj.normalised_state_distribution
num_of_players = len(norm_state_dist)
grd_ttl = 0
for x in norm_state_dist:
for bunch in grouper(x,num_of_players):
totl = 0
for pl in range(num_of_players):
i = bunch[pl]
totl += i[state_tupl] # Each player's CC distribution (one for
# each opponent) is summed together
Ttl=totl/(num_of_players-1) # Normalized across opponents by
# dividing by num_of_players-1
grd_ttl += Ttl
return grd_ttl/num_of_players # Averaged across all players
class PdTournament:
"""
A class to represent a tournament.
...
Attributes
----------
player_list : list
list of strategy names that also describe the tournament players
names : str
single string that includes the names of all strategies/players
separated by comma
game : axelrod.game (object)
container for game matrix and scoring logic
data : pandas.dataframe (object)
placeholder for the tournament results
Methods
-------
run_tournament(reps=1):
Executes a round-robin tournament with all listed players. Results are
computed and stored in data variable as a pandas dataframe.
save_data(file_name):
Saves tournament data as a csv file
"""
def __init__(self, strategy_list, game=None, reps=1):
"""
Constructs all the necessary attributes for tournament object
Parameters
----------
player_list : list
list of strategy names that also describe the tournament players
game : axelrod.game (object)
container for game matrix and scoring logic (default is None, which
will prompt the classic PD setting)
reps : int
number of times to repeat tournament (default is 1)
"""
self.player_list = strategy_list
self.names = ','.join(sorted([n.name for n in strategy_list]))
self.game = game
self.data = self.run_tournament(reps) # If reps=1, then data will be
# one row. If reps >1, then data
# will be multiple rows
def __repr__(self):
return self.names
def run_tournament(self, reps=1):
"""
Executes a round-robin tournament with all listed players.
Results are computed and stored in data attribute as a pandas dataframe.
Parameters
----------
reps : int
number of times to repeat tournament (default is 1)
Returns
-------
data_row : pandas.dataframe (object)
row representation of individual tournament results, which consists of
tournament players, player statistics and tournament metrics
"""
# Instantiate tournament object
roster = self.player_list
print('Instantiating tournament object with these players: ', self.names)
tourn = Tournament(players=roster,
game=self.game,
prob_end=0.1,
turns=30,
repetitions=reps,
seed=1)
results = tourn.play(processes=0)
# Collect Group Outcome Metrics
normal_scores = results.normalised_scores
avg_norm_score = np.average(normal_scores)
min_norm_score = np.amin(normal_scores)
avg_norm_cc_distribution = avg_normalised_state(results, (Action.C,Action.C))
data = [self.names,
avg_norm_score,
min_norm_score,
avg_norm_cc_distribution]
col = ['Tournament_Members',
'Avg_Norm_Score',
'Min_Norm_Score',
'Avg_Norm_CC_Distribution']
# List manipulation to identify individual players in separate columns
sorted_list = sorted([n.name for n in roster])
pl_list = list()
for num, p in enumerate(sorted_list,1):
pl_list.append(f'Player{num}')
pl_list.append(f'P{num}_Norm_Score')
pl_data_list = list()
for name, score in zip(sorted_list, normal_scores):
pl_data_list.append(name)
pl_data_list.append(score[0])
data = [data[0]]+pl_data_list+data[1:]
col = [col[0]]+pl_list+col[1:]
# Store data in pandas dataframe
data_row = pd.DataFrame([data], columns=col)
#self.data = data_row
return data_row
def save_data(self, file_name):
""" Saves tournament data as a csv file """
if self.game is None:
R,P,S,T = game.Game().RPST()
else:
R,P,S,T = self.game.RPST()
self.data.to_csv(file_name+f'_gameRPST_{R!r}_{P!r}_{S!r}_{T!r}.csv',
index=False)
class PdSystem:
"""
A class to represent a system of tournaments.
...
Attributes
----------
game : axelrod.game (object)
container for game matrix and scoring logic
data : pandas.dataframe (object)
placeholder for system data
id :
team_dict : dictionary
container to hold team-tournament object pairs
Methods
-------
compute_data:
Concatenates each individual team dataframe, computes the system
metrics, and then assigns a single dataframe to data attribute
save_data(file_name):
Saves system data as a csv file
"""
def __init__(self, team_list, game_type=None):
"""
Constructs all the necessary attributes for system object
Parameters
----------
team_list : list
a two-dimensional list where each item of the first-dimension is a
player_list for a single tournament
game_type : axelrod.game (object)
container for game matrix and scoring logic (default is None, which
will prompt the classic PD setting)
"""
self.data = None
self.id = None
self.game = game_type
tournament_dict = dict()
# Loop through team list and construct tournament instances
# for each team. Save each team to the tournament dictionary
for num, team in enumerate(team_list,1):
player_list = [code.settings.name_strategy_dict[i] for i in team]
new_tour = PdTournament(player_list, game_type)
tournament_dict[f'Team{num}'] = new_tour
self.team_dict = tournament_dict
def compute_data(self):
'''
Concatenates each individual team dataframe, computes the system
metrics, and then assigns a single dataframe to data attribute
'''
first = True
for key, value in self.team_dict.items():
# renaming columns to tournament data frame
df = value.data.rename(columns={'Tournament_Members': key,
'Avg_Norm_Score': f'{key} Avg Score',
'Min_Norm_Score': f'{key} Min Score',
'Avg_Norm_CC_Distribution': f'{key} Avg CC Dist'})
if first:
df1 = df
first = False
else:
df1 = pd.concat([df1,df], axis=1)
df1.index += 1 # Initialize index from 1 instead of 0
# Collect team data
min_scores = [df1[f'{i} Min Score'].values for i in list(self.team_dict)]
avg_scores = [df1[f'{i} Avg Score'].values for i in list(self.team_dict)]
cc_dists = [df1[f'{i} Avg CC Dist'].values for i in list(self.team_dict)]
# Compute system metrics and create new data frame
sys_df = pd.DataFrame({'SYS MIN Score' : [np.amin(min_scores)],
'SYS AVG Score' : [np.average(avg_scores)],
'MIN of Team Avgs' : [np.amin(avg_scores)],
'AVG of Team Mins' : [np.average(min_scores)],
'SYS CC Dist AVG' : [np.average(cc_dists)],
'SYS CC Dist MIN' : [np.amin(cc_dists)]},
index=[1])
# Concatenate two data frames
sys_df = | pd.concat([sys_df,df1], axis=1) | pandas.concat |
# LIBRARIES
# set up backend for ssh -x11 figures
import matplotlib
matplotlib.use('Agg')
# read and write
import os
import sys
import glob
import re
import fnmatch
import csv
import shutil
from datetime import datetime
# maths
import numpy as np
import pandas as pd
import math
import random
# miscellaneous
import warnings
import gc
import timeit
# sklearn
from sklearn.utils import resample
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score, log_loss, roc_auc_score, \
accuracy_score, f1_score, precision_score, recall_score, confusion_matrix, average_precision_score
from sklearn.utils.validation import check_is_fitted
from sklearn.model_selection import KFold, PredefinedSplit, cross_validate
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LinearRegression, ElasticNet
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import StandardScaler
# Statistics
from scipy.stats import pearsonr, ttest_rel, norm
# Other tools for ensemble models building (<NAME>'s InnerCV class)
from hyperopt import fmin, tpe, space_eval, Trials, hp, STATUS_OK
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
# CPUs
from multiprocessing import Pool
# GPUs
from GPUtil import GPUtil
# tensorflow
import tensorflow as tf
# keras
from keras_preprocessing.image import ImageDataGenerator, Iterator
from keras_preprocessing.image.utils import load_img, img_to_array, array_to_img
from tensorflow.keras.utils import Sequence
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Flatten, Dense, Dropout, GlobalAveragePooling2D, concatenate
from tensorflow.keras import regularizers
from tensorflow.keras.optimizers import Adam, RMSprop, Adadelta
from tensorflow.keras.callbacks import Callback, EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, CSVLogger
from tensorflow.keras.losses import MeanSquaredError, BinaryCrossentropy
from tensorflow.keras.metrics import RootMeanSquaredError, MeanAbsoluteError, AUC, BinaryAccuracy, Precision, Recall, \
TruePositives, FalsePositives, FalseNegatives, TrueNegatives
from tensorflow_addons.metrics import RSquare, F1Score
# Plots
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from PIL import Image
from bioinfokit import visuz
# Model's attention
from keract import get_activations, get_gradients_of_activations
from scipy.ndimage.interpolation import zoom
# Survival
from lifelines.utils import concordance_index
# Necessary to define MyCSVLogger
import collections
import csv
import io
import six
from tensorflow.python.lib.io import file_io
from tensorflow.python.util.compat import collections_abc
from tensorflow.keras.backend import eval
# Set display parameters
pd.set_option('display.max_rows', 200)
# CLASSES
class Basics:
"""
Root class herited by most other class. Includes handy helper functions
"""
def __init__(self):
# seeds for reproducibility
self.seed = 0
os.environ['PYTHONHASHSEED'] = str(self.seed)
np.random.seed(self.seed)
random.seed(self.seed)
# other parameters
self.path_data = '../data/'
self.folds = ['train', 'val', 'test']
self.n_CV_outer_folds = 10
self.outer_folds = [str(x) for x in list(range(self.n_CV_outer_folds))]
self.modes = ['', '_sd', '_str']
self.id_vars = ['id', 'eid', 'instance', 'outer_fold']
self.instances = ['0', '1', '1.5', '1.51', '1.52', '1.53', '1.54', '2', '3']
self.ethnicities_vars_forgot_Other = \
['Ethnicity.White', 'Ethnicity.British', 'Ethnicity.Irish', 'Ethnicity.White_Other', 'Ethnicity.Mixed',
'Ethnicity.White_and_Black_Caribbean', 'Ethnicity.White_and_Black_African', 'Ethnicity.White_and_Asian',
'Ethnicity.Mixed_Other', 'Ethnicity.Asian', 'Ethnicity.Indian', 'Ethnicity.Pakistani',
'Ethnicity.Bangladeshi', 'Ethnicity.Asian_Other', 'Ethnicity.Black', 'Ethnicity.Caribbean',
'Ethnicity.African', 'Ethnicity.Black_Other', 'Ethnicity.Chinese', 'Ethnicity.Other_ethnicity',
'Ethnicity.Do_not_know', 'Ethnicity.Prefer_not_to_answer', 'Ethnicity.NA']
self.ethnicities_vars = \
['Ethnicity.White', 'Ethnicity.British', 'Ethnicity.Irish', 'Ethnicity.White_Other', 'Ethnicity.Mixed',
'Ethnicity.White_and_Black_Caribbean', 'Ethnicity.White_and_Black_African', 'Ethnicity.White_and_Asian',
'Ethnicity.Mixed_Other', 'Ethnicity.Asian', 'Ethnicity.Indian', 'Ethnicity.Pakistani',
'Ethnicity.Bangladeshi', 'Ethnicity.Asian_Other', 'Ethnicity.Black', 'Ethnicity.Caribbean',
'Ethnicity.African', 'Ethnicity.Black_Other', 'Ethnicity.Chinese', 'Ethnicity.Other',
'Ethnicity.Other_ethnicity', 'Ethnicity.Do_not_know', 'Ethnicity.Prefer_not_to_answer', 'Ethnicity.NA']
self.demographic_vars = ['Age', 'Sex'] + self.ethnicities_vars
self.names_model_parameters = ['target', 'organ', 'view', 'transformation', 'architecture', 'n_fc_layers',
'n_fc_nodes', 'optimizer', 'learning_rate', 'weight_decay', 'dropout_rate',
'data_augmentation_factor']
self.targets_regression = ['Age']
self.targets_binary = ['Sex']
self.models_types = ['', '_bestmodels']
self.dict_prediction_types = {'Age': 'regression', 'Sex': 'binary'}
self.dict_side_predictors = {'Age': ['Sex'] + self.ethnicities_vars_forgot_Other,
'Sex': ['Age'] + self.ethnicities_vars_forgot_Other}
self.organs = ['Brain', 'Eyes', 'Arterial', 'Heart', 'Abdomen', 'Musculoskeletal']
self.left_right_organs_views = ['Eyes_Fundus', 'Eyes_OCT', 'Arterial_Carotids', 'Musculoskeletal_Hips',
'Musculoskeletal_Knees']
self.dict_organs_to_views = {'Brain': ['MRI'],
'Eyes': ['Fundus', 'OCT'],
'Arterial': ['Carotids'],
'Heart': ['MRI'],
'Abdomen': ['Liver', 'Pancreas'],
'Musculoskeletal': ['Spine', 'Hips', 'Knees', 'FullBody'],
'PhysicalActivity': ['FullWeek']}
self.dict_organsviews_to_transformations = \
{'Brain_MRI': ['SagittalRaw', 'SagittalReference', 'CoronalRaw', 'CoronalReference', 'TransverseRaw',
'TransverseReference'],
'Arterial_Carotids': ['Mixed', 'LongAxis', 'CIMT120', 'CIMT150', 'ShortAxis'],
'Heart_MRI': ['2chambersRaw', '2chambersContrast', '3chambersRaw', '3chambersContrast', '4chambersRaw',
'4chambersContrast'],
'Musculoskeletal_Spine': ['Sagittal', 'Coronal'],
'Musculoskeletal_FullBody': ['Mixed', 'Figure', 'Skeleton', 'Flesh'],
'PhysicalActivity_FullWeek': ['GramianAngularField1minDifference', 'GramianAngularField1minSummation',
'MarkovTransitionField1min', 'RecurrencePlots1min']}
self.dict_organsviews_to_transformations.update(dict.fromkeys(['Eyes_Fundus', 'Eyes_OCT'], ['Raw']))
self.dict_organsviews_to_transformations.update(
dict.fromkeys(['Abdomen_Liver', 'Abdomen_Pancreas'], ['Raw', 'Contrast']))
self.dict_organsviews_to_transformations.update(
dict.fromkeys(['Musculoskeletal_Hips', 'Musculoskeletal_Knees'], ['MRI']))
self.organsviews_not_to_augment = []
self.organs_instances23 = ['Brain', 'Eyes', 'Arterial', 'Heart', 'Abdomen', 'Musculoskeletal',
'PhysicalActivity']
self.organs_XWAS = \
['*', '*instances01', '*instances1.5x', '*instances23', 'Brain', 'BrainCognitive', 'BrainMRI', 'Eyes',
'EyesFundus', 'EyesOCT', 'Hearing', 'Lungs', 'Arterial', 'ArterialPulseWaveAnalysis', 'ArterialCarotids',
'Heart', 'HeartECG', 'HeartMRI', 'Abdomen', 'AbdomenLiver', 'AbdomenPancreas', 'Musculoskeletal',
'MusculoskeletalSpine', 'MusculoskeletalHips', 'MusculoskeletalKnees', 'MusculoskeletalFullBody',
'MusculoskeletalScalars', 'PhysicalActivity', 'Biochemistry', 'BiochemistryUrine', 'BiochemistryBlood',
'ImmuneSystem']
# Others
if '/Users/Alan/' in os.getcwd():
os.chdir('/Users/Alan/Desktop/Aging/Medical_Images/scripts/')
else:
os.chdir('/n/groups/patel/Alan/Aging/Medical_Images/scripts/')
gc.enable() # garbage collector
warnings.filterwarnings('ignore')
def _version_to_parameters(self, model_name):
parameters = {}
parameters_list = model_name.split('_')
for i, parameter in enumerate(self.names_model_parameters):
parameters[parameter] = parameters_list[i]
if len(parameters_list) > 11:
parameters['outer_fold'] = parameters_list[11]
return parameters
@staticmethod
def _parameters_to_version(parameters):
return '_'.join(parameters.values())
@staticmethod
def convert_string_to_boolean(string):
if string == 'True':
boolean = True
elif string == 'False':
boolean = False
else:
print('ERROR: string must be either \'True\' or \'False\'')
sys.exit(1)
return boolean
class Metrics(Basics):
"""
Helper class defining dictionaries of metrics and custom metrics
"""
def __init__(self):
# Parameters
Basics.__init__(self)
self.metrics_displayed_in_int = ['True-Positives', 'True-Negatives', 'False-Positives', 'False-Negatives']
self.metrics_needing_classpred = ['F1-Score', 'Binary-Accuracy', 'Precision', 'Recall']
self.dict_metrics_names_K = {'regression': ['RMSE'], # For now, R-Square is buggy. Try again in a few months.
'binary': ['ROC-AUC', 'PR-AUC', 'F1-Score', 'Binary-Accuracy', 'Precision',
'Recall', 'True-Positives', 'False-Positives', 'False-Negatives',
'True-Negatives'],
'multiclass': ['Categorical-Accuracy']}
self.dict_metrics_names = {'regression': ['RMSE', 'MAE', 'R-Squared', 'Pearson-Correlation'],
'binary': ['ROC-AUC', 'F1-Score', 'PR-AUC', 'Binary-Accuracy', 'Sensitivity',
'Specificity', 'Precision', 'Recall', 'True-Positives', 'False-Positives',
'False-Negatives', 'True-Negatives'],
'multiclass': ['Categorical-Accuracy']}
self.dict_losses_names = {'regression': 'MSE', 'binary': 'Binary-Crossentropy',
'multiclass': 'categorical_crossentropy'}
self.dict_main_metrics_names_K = {'Age': 'MAE', 'Sex': 'PR-AUC', 'imbalanced_binary_placeholder': 'PR-AUC'}
self.dict_main_metrics_names = {'Age': 'R-Squared', 'Sex': 'ROC-AUC',
'imbalanced_binary_placeholder': 'PR-AUC'}
self.main_metrics_modes = {'loss': 'min', 'R-Squared': 'max', 'Pearson-Correlation': 'max', 'RMSE': 'min',
'MAE': 'min', 'ROC-AUC': 'max', 'PR-AUC': 'max', 'F1-Score': 'max', 'C-Index': 'max',
'C-Index-difference': 'max'}
self.n_bootstrap_iterations = 1000
def rmse(y_true, y_pred):
return math.sqrt(mean_squared_error(y_true, y_pred))
def sensitivity_score(y, pred):
_, _, fn, tp = confusion_matrix(y, pred.round()).ravel()
return tp / (tp + fn)
def specificity_score(y, pred):
tn, fp, _, _ = confusion_matrix(y, pred.round()).ravel()
return tn / (tn + fp)
def true_positives_score(y, pred):
_, _, _, tp = confusion_matrix(y, pred.round()).ravel()
return tp
def false_positives_score(y, pred):
_, fp, _, _ = confusion_matrix(y, pred.round()).ravel()
return fp
def false_negatives_score(y, pred):
_, _, fn, _ = confusion_matrix(y, pred.round()).ravel()
return fn
def true_negatives_score(y, pred):
tn, _, _, _ = confusion_matrix(y, pred.round()).ravel()
return tn
self.dict_metrics_sklearn = {'mean_squared_error': mean_squared_error,
'mean_absolute_error': mean_absolute_error,
'RMSE': rmse,
'Pearson-Correlation': pearsonr,
'R-Squared': r2_score,
'Binary-Crossentropy': log_loss,
'ROC-AUC': roc_auc_score,
'F1-Score': f1_score,
'PR-AUC': average_precision_score,
'Binary-Accuracy': accuracy_score,
'Sensitivity': sensitivity_score,
'Specificity': specificity_score,
'Precision': precision_score,
'Recall': recall_score,
'True-Positives': true_positives_score,
'False-Positives': false_positives_score,
'False-Negatives': false_negatives_score,
'True-Negatives': true_negatives_score}
def _bootstrap(self, data, function):
results = []
for i in range(self.n_bootstrap_iterations):
data_i = resample(data, replace=True, n_samples=len(data.index))
results.append(function(data_i['y'], data_i['pred']))
return np.mean(results), np.std(results)
class PreprocessingMain(Basics):
"""
This class executes the code for step 01. It preprocesses the main dataframe by:
- reformating the rows and columns
- splitting the dataset into folds for the future cross validations
- imputing key missing data
- adding a new UKB instance for physical activity data
- formating the demographics columns (age, sex and ethnicity)
- reformating the dataframe so that different instances of the same participant are treated as different rows
- saving the dataframe
"""
def __init__(self):
Basics.__init__(self)
self.data_raw = None
self.data_features = None
self.data_features_eids = None
def _add_outer_folds(self):
outer_folds_split = pd.read_csv(self.path_data + 'All_eids.csv')
outer_folds_split.rename(columns={'fold': 'outer_fold'}, inplace=True)
outer_folds_split['eid'] = outer_folds_split['eid'].astype('str')
outer_folds_split['outer_fold'] = outer_folds_split['outer_fold'].astype('str')
outer_folds_split.set_index('eid', inplace=True)
self.data_raw = self.data_raw.join(outer_folds_split)
def _impute_missing_ecg_instances(self):
data_ecgs = pd.read_csv('/n/groups/patel/Alan/Aging/TimeSeries/scripts/age_analysis/missing_samples.csv')
data_ecgs['eid'] = data_ecgs['eid'].astype(str)
data_ecgs['instance'] = data_ecgs['instance'].astype(str)
for _, row in data_ecgs.iterrows():
self.data_raw.loc[row['eid'], 'Date_attended_center_' + row['instance']] = row['observation_date']
def _add_physicalactivity_instances(self):
data_pa = pd.read_csv(
'/n/groups/patel/Alan/Aging/TimeSeries/series/PhysicalActivity/90001/features/PA_visit_date.csv')
data_pa['eid'] = data_pa['eid'].astype(str)
data_pa.set_index('eid', drop=False, inplace=True)
data_pa.index.name = 'column_names'
self.data_raw = self.data_raw.merge(data_pa, on=['eid'], how='outer')
self.data_raw.set_index('eid', drop=False, inplace=True)
def _compute_sex(self):
# Use genetic sex when available
self.data_raw['Sex_genetic'][self.data_raw['Sex_genetic'].isna()] = \
self.data_raw['Sex'][self.data_raw['Sex_genetic'].isna()]
self.data_raw.drop(['Sex'], axis=1, inplace=True)
self.data_raw.rename(columns={'Sex_genetic': 'Sex'}, inplace=True)
self.data_raw.dropna(subset=['Sex'], inplace=True)
def _compute_age(self):
# Recompute age with greater precision by leveraging the month of birth
self.data_raw['Year_of_birth'] = self.data_raw['Year_of_birth'].astype(int)
self.data_raw['Month_of_birth'] = self.data_raw['Month_of_birth'].astype(int)
self.data_raw['Date_of_birth'] = self.data_raw.apply(
lambda row: datetime(row.Year_of_birth, row.Month_of_birth, 15), axis=1)
for i in self.instances:
self.data_raw['Date_attended_center_' + i] = \
self.data_raw['Date_attended_center_' + i].apply(
lambda x: pd.NaT if pd.isna(x) else datetime.strptime(x, '%Y-%m-%d'))
self.data_raw['Age_' + i] = self.data_raw['Date_attended_center_' + i] - self.data_raw['Date_of_birth']
self.data_raw['Age_' + i] = self.data_raw['Age_' + i].dt.days / 365.25
self.data_raw.drop(['Date_attended_center_' + i], axis=1, inplace=True)
self.data_raw.drop(['Year_of_birth', 'Month_of_birth', 'Date_of_birth'], axis=1, inplace=True)
self.data_raw.dropna(how='all', subset=['Age_0', 'Age_1', 'Age_1.5', 'Age_1.51', 'Age_1.52', 'Age_1.53',
'Age_1.54', 'Age_2', 'Age_3'], inplace=True)
def _encode_ethnicity(self):
# Fill NAs for ethnicity on instance 0 if available in other instances
eids_missing_ethnicity = self.data_raw['eid'][self.data_raw['Ethnicity'].isna()]
for eid in eids_missing_ethnicity:
sample = self.data_raw.loc[eid, :]
if not math.isnan(sample['Ethnicity_1']):
self.data_raw.loc[eid, 'Ethnicity'] = self.data_raw.loc[eid, 'Ethnicity_1']
elif not math.isnan(sample['Ethnicity_2']):
self.data_raw.loc[eid, 'Ethnicity'] = self.data_raw.loc[eid, 'Ethnicity_2']
self.data_raw.drop(['Ethnicity_1', 'Ethnicity_2'], axis=1, inplace=True)
# One hot encode ethnicity
dict_ethnicity_codes = {'1': 'Ethnicity.White', '1001': 'Ethnicity.British', '1002': 'Ethnicity.Irish',
'1003': 'Ethnicity.White_Other',
'2': 'Ethnicity.Mixed', '2001': 'Ethnicity.White_and_Black_Caribbean',
'2002': 'Ethnicity.White_and_Black_African',
'2003': 'Ethnicity.White_and_Asian', '2004': 'Ethnicity.Mixed_Other',
'3': 'Ethnicity.Asian', '3001': 'Ethnicity.Indian', '3002': 'Ethnicity.Pakistani',
'3003': 'Ethnicity.Bangladeshi', '3004': 'Ethnicity.Asian_Other',
'4': 'Ethnicity.Black', '4001': 'Ethnicity.Caribbean', '4002': 'Ethnicity.African',
'4003': 'Ethnicity.Black_Other',
'5': 'Ethnicity.Chinese',
'6': 'Ethnicity.Other_ethnicity',
'-1': 'Ethnicity.Do_not_know',
'-3': 'Ethnicity.Prefer_not_to_answer',
'-5': 'Ethnicity.NA'}
self.data_raw['Ethnicity'] = self.data_raw['Ethnicity'].fillna(-5).astype(int).astype(str)
ethnicities = pd.get_dummies(self.data_raw['Ethnicity'])
self.data_raw.drop(['Ethnicity'], axis=1, inplace=True)
ethnicities.rename(columns=dict_ethnicity_codes, inplace=True)
ethnicities['Ethnicity.White'] = ethnicities['Ethnicity.White'] + ethnicities['Ethnicity.British'] + \
ethnicities['Ethnicity.Irish'] + ethnicities['Ethnicity.White_Other']
ethnicities['Ethnicity.Mixed'] = ethnicities['Ethnicity.Mixed'] + \
ethnicities['Ethnicity.White_and_Black_Caribbean'] + \
ethnicities['Ethnicity.White_and_Black_African'] + \
ethnicities['Ethnicity.White_and_Asian'] + \
ethnicities['Ethnicity.Mixed_Other']
ethnicities['Ethnicity.Asian'] = ethnicities['Ethnicity.Asian'] + ethnicities['Ethnicity.Indian'] + \
ethnicities['Ethnicity.Pakistani'] + ethnicities['Ethnicity.Bangladeshi'] + \
ethnicities['Ethnicity.Asian_Other']
ethnicities['Ethnicity.Black'] = ethnicities['Ethnicity.Black'] + ethnicities['Ethnicity.Caribbean'] + \
ethnicities['Ethnicity.African'] + ethnicities['Ethnicity.Black_Other']
ethnicities['Ethnicity.Other'] = ethnicities['Ethnicity.Other_ethnicity'] + \
ethnicities['Ethnicity.Do_not_know'] + \
ethnicities['Ethnicity.Prefer_not_to_answer'] + \
ethnicities['Ethnicity.NA']
self.data_raw = self.data_raw.join(ethnicities)
def generate_data(self):
# Preprocessing
dict_UKB_fields_to_names = {'34-0.0': 'Year_of_birth', '52-0.0': 'Month_of_birth',
'53-0.0': 'Date_attended_center_0', '53-1.0': 'Date_attended_center_1',
'53-2.0': 'Date_attended_center_2', '53-3.0': 'Date_attended_center_3',
'31-0.0': 'Sex', '22001-0.0': 'Sex_genetic', '21000-0.0': 'Ethnicity',
'21000-1.0': 'Ethnicity_1', '21000-2.0': 'Ethnicity_2',
'22414-2.0': 'Abdominal_images_quality'}
self.data_raw = pd.read_csv('/n/groups/patel/uk_biobank/project_52887_41230/ukb41230.csv',
usecols=['eid', '31-0.0', '22001-0.0', '21000-0.0', '21000-1.0', '21000-2.0',
'34-0.0', '52-0.0', '53-0.0', '53-1.0', '53-2.0', '53-3.0', '22414-2.0'])
# Formatting
self.data_raw.rename(columns=dict_UKB_fields_to_names, inplace=True)
self.data_raw['eid'] = self.data_raw['eid'].astype(str)
self.data_raw.set_index('eid', drop=False, inplace=True)
self.data_raw.index.name = 'column_names'
self._add_outer_folds()
self._impute_missing_ecg_instances()
self._add_physicalactivity_instances()
self._compute_sex()
self._compute_age()
self._encode_ethnicity()
# Concatenate the data from the different instances
self.data_features = None
for i in self.instances:
print('Preparing the samples for instance ' + i)
df_i = self.data_raw[['eid', 'outer_fold', 'Age_' + i, 'Sex'] + self.ethnicities_vars +
['Abdominal_images_quality']].dropna(subset=['Age_' + i])
print(str(len(df_i.index)) + ' samples found in instance ' + i)
df_i.rename(columns={'Age_' + i: 'Age'}, inplace=True)
df_i['instance'] = i
df_i['id'] = df_i['eid'] + '_' + df_i['instance']
df_i = df_i[self.id_vars + self.demographic_vars + ['Abdominal_images_quality']]
if i != '2':
df_i['Abdominal_images_quality'] = np.nan # not defined for instance 3, not relevant for instances 0, 1
if self.data_features is None:
self.data_features = df_i
else:
self.data_features = self.data_features.append(df_i)
print('The size of the full concatenated dataframe is now ' + str(len(self.data_features.index)))
# Save age as a float32 instead of float64
self.data_features['Age'] = np.float32(self.data_features['Age'])
# Shuffle the rows before saving the dataframe
self.data_features = self.data_features.sample(frac=1)
# Generate dataframe for eids pipeline as opposed to instances pipeline
self.data_features_eids = self.data_features[self.data_features.instance == '0']
self.data_features_eids['instance'] = '*'
self.data_features_eids['id'] = [ID.replace('_0', '_*') for ID in self.data_features_eids['id'].values]
def save_data(self):
self.data_features.to_csv(self.path_data + 'data-features_instances.csv', index=False)
self.data_features_eids.to_csv(self.path_data + 'data-features_eids.csv', index=False)
class PreprocessingImagesIDs(Basics):
"""
Splits the different images datasets into folds for the future cross validation
"""
def __init__(self):
Basics.__init__(self)
# Instances 2 and 3 datasets (most medical images, mostly medical images)
self.instances23_eids = None
self.HEART_EIDs = None
self.heart_eids = None
self.FOLDS_23_EIDS = None
def _load_23_eids(self):
data_features = pd.read_csv(self.path_data + 'data-features_instances.csv')
images_eids = data_features['eid'][data_features['instance'].isin([2, 3])]
self.images_eids = list(set(images_eids))
def _load_heart_eids(self):
# IDs already used in Heart videos
HEART_EIDS = {}
heart_eids = []
for i in range(10):
# Important: The i's data fold is used as *validation* fold for outer fold i.
data_i = pd.read_csv(
"/n/groups/patel/JbProst/Heart/Data/FoldsAugmented/data-features_Heart_20208_Augmented_Age_val_" + str(
i) + ".csv")
HEART_EIDS[i] = list(set([int(str(ID)[:7]) for ID in data_i['eid']]))
heart_eids = heart_eids + HEART_EIDS[i]
self.HEART_EIDS = HEART_EIDS
self.heart_eids = heart_eids
def _split_23_eids_folds(self):
self._load_23_eids()
self._load_heart_eids()
# List extra images ids, and split them between the different folds.
extra_eids = [eid for eid in self.images_eids if eid not in self.heart_eids]
random.shuffle(extra_eids)
n_samples = len(extra_eids)
n_samples_by_fold = n_samples / self.n_CV_outer_folds
FOLDS_EXTRAEIDS = {}
FOLDS_EIDS = {}
for outer_fold in self.outer_folds:
FOLDS_EXTRAEIDS[outer_fold] = \
extra_eids[int((int(outer_fold)) * n_samples_by_fold):int((int(outer_fold) + 1) * n_samples_by_fold)]
FOLDS_EIDS[outer_fold] = self.HEART_EIDS[int(outer_fold)] + FOLDS_EXTRAEIDS[outer_fold]
self.FOLDS_23_EIDS = FOLDS_EIDS
def _save_23_eids_folds(self):
for outer_fold in self.outer_folds:
with open(self.path_data + 'instances23_eids_' + outer_fold + '.csv', 'w', newline='') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
wr.writerow(self.FOLDS_23_EIDS[outer_fold])
def generate_eids_splits(self):
print("Generating eids split for organs on instances 2 and 3")
self._split_23_eids_folds()
self._save_23_eids_folds()
class PreprocessingFolds(Metrics):
"""
Splits the data into training, validation and testing sets for all CV folds
"""
def __init__(self, target, organ, regenerate_data):
Metrics.__init__(self)
self.target = target
self.organ = organ
self.list_ids_per_view_transformation = None
# Check if these folds have already been generated
if not regenerate_data:
if len(glob.glob(self.path_data + 'data-features_' + organ + '_*_' + target + '_*.csv')) > 0:
print("Error: The files already exist! Either change regenerate_data to True or delete the previous"
" version.")
sys.exit(1)
self.side_predictors = self.dict_side_predictors[target]
self.variables_to_normalize = self.side_predictors
if target in self.targets_regression:
self.variables_to_normalize.append(target)
self.dict_image_quality_col = {'Liver': 'Abdominal_images_quality'}
self.dict_image_quality_col.update(
dict.fromkeys(['Brain', 'Eyes', 'Arterial', 'Heart', 'Abdomen', 'Musculoskeletal', 'PhysicalActivity'],
None))
self.image_quality_col = self.dict_image_quality_col[organ]
self.views = self.dict_organs_to_views[organ]
self.list_ids = None
self.list_ids_per_view = {}
self.data = None
self.EIDS = None
self.EIDS_per_view = {'train': {}, 'val': {}, 'test': {}}
self.data_fold = None
def _get_list_ids(self):
self.list_ids_per_view_transformation = {}
list_ids = []
# if different views are available, take the union of the ids
for view in self.views:
self.list_ids_per_view_transformation[view] = {}
for transformation in self.dict_organsviews_to_transformations[self.organ + '_' + view]:
list_ids_transformation = []
path = '../images/' + self.organ + '/' + view + '/' + transformation + '/'
# for paired organs, take the unions of the ids available on the right and the left sides
if self.organ + '_' + view in self.left_right_organs_views:
for side in ['right', 'left']:
list_ids_transformation += os.listdir(path + side + '/')
list_ids_transformation = np.unique(list_ids_transformation).tolist()
else:
list_ids_transformation += os.listdir(path)
self.list_ids_per_view_transformation[view][transformation] = \
[im.replace('.jpg', '') for im in list_ids_transformation]
list_ids += self.list_ids_per_view_transformation[view][transformation]
self.list_ids = np.unique(list_ids).tolist()
self.list_ids.sort()
def _filter_and_format_data(self):
"""
Clean the data before it can be split between the rows
"""
cols_data = self.id_vars + self.demographic_vars
if self.image_quality_col is not None:
cols_data.append(self.dict_image_quality_col[self.organ])
data = pd.read_csv(self.path_data + 'data-features_instances.csv', usecols=cols_data)
data.rename(columns={self.dict_image_quality_col[self.organ]: 'Data_quality'}, inplace=True)
for col_name in self.id_vars:
data[col_name] = data[col_name].astype(str)
data.set_index('id', drop=False, inplace=True)
if self.image_quality_col is not None:
data = data[data['Data_quality'] != np.nan]
data.drop('Data_quality', axis=1, inplace=True)
# get rid of samples with NAs
data.dropna(inplace=True)
# list the samples' ids for which images are available
data = data.loc[self.list_ids]
self.data = data
def _split_data(self):
# Generate the data for each outer_fold
for i, outer_fold in enumerate(self.outer_folds):
of_val = outer_fold
of_test = str((int(outer_fold) + 1) % len(self.outer_folds))
DATA = {
'train': self.data[~self.data['outer_fold'].isin([of_val, of_test])],
'val': self.data[self.data['outer_fold'] == of_val],
'test': self.data[self.data['outer_fold'] == of_test]
}
# Generate the data for the different views and transformations
for view in self.views:
for transformation in self.dict_organsviews_to_transformations[self.organ + '_' + view]:
print('Splitting data for view ' + view + ', and transformation ' + transformation)
DF = {}
for fold in self.folds:
idx = DATA[fold]['id'].isin(self.list_ids_per_view_transformation[view][transformation]).values
DF[fold] = DATA[fold].iloc[idx, :]
# compute values for scaling of variables
normalizing_values = {}
for var in self.variables_to_normalize:
var_mean = DF['train'][var].mean()
if len(DF['train'][var].unique()) < 2:
print('Variable ' + var + ' has a single value in fold ' + outer_fold +
'. Using 1 as std for normalization.')
var_std = 1
else:
var_std = DF['train'][var].std()
normalizing_values[var] = {'mean': var_mean, 'std': var_std}
# normalize the variables
for fold in self.folds:
for var in self.variables_to_normalize:
DF[fold][var + '_raw'] = DF[fold][var]
DF[fold][var] = (DF[fold][var] - normalizing_values[var]['mean']) \
/ normalizing_values[var]['std']
# report issue if NAs were detected (most likely comes from a sample whose id did not match)
n_mismatching_samples = DF[fold].isna().sum().max()
if n_mismatching_samples > 0:
print(DF[fold][DF[fold].isna().any(axis=1)])
print('/!\\ WARNING! ' + str(n_mismatching_samples) + ' ' + fold + ' images ids out of ' +
str(len(DF[fold].index)) + ' did not match the dataframe!')
# save the data
DF[fold].to_csv(self.path_data + 'data-features_' + self.organ + '_' + view + '_' +
transformation + '_' + self.target + '_' + fold + '_' + outer_fold + '.csv',
index=False)
print('For outer_fold ' + outer_fold + ', the ' + fold + ' fold has a sample size of ' +
str(len(DF[fold].index)))
def generate_folds(self):
self._get_list_ids()
self._filter_and_format_data()
self._split_data()
class PreprocessingSurvival(Basics):
"""
Preprocesses the main dataframe for survival purposes.
Mirrors the PreprocessingMain class, but computes Death time and FollowTime for the future survival analysis
"""
def __init__(self):
Basics.__init__(self)
self.data_raw = None
self.data_features = None
self.data_features_eids = None
self.survival_vars = ['FollowUpTime', 'Death']
def _preprocessing(self):
usecols = ['eid', '40000-0.0', '34-0.0', '52-0.0', '53-0.0', '53-1.0', '53-2.0', '53-3.0']
self.data_raw = pd.read_csv('/n/groups/patel/uk_biobank/project_52887_41230/ukb41230.csv', usecols=usecols)
dict_UKB_fields_to_names = {'40000-0.0': 'FollowUpDate', '34-0.0': 'Year_of_birth', '52-0.0': 'Month_of_birth',
'53-0.0': 'Date_attended_center_0', '53-1.0': 'Date_attended_center_1',
'53-2.0': 'Date_attended_center_2', '53-3.0': 'Date_attended_center_3'}
self.data_raw.rename(columns=dict_UKB_fields_to_names, inplace=True)
self.data_raw['eid'] = self.data_raw['eid'].astype(str)
self.data_raw.set_index('eid', drop=False, inplace=True)
self.data_raw.index.name = 'column_names'
# Format survival data
self.data_raw['Death'] = ~self.data_raw['FollowUpDate'].isna()
self.data_raw['FollowUpDate'][self.data_raw['FollowUpDate'].isna()] = '2020-04-27'
self.data_raw['FollowUpDate'] = self.data_raw['FollowUpDate'].apply(
lambda x: pd.NaT if pd.isna(x) else datetime.strptime(x, '%Y-%m-%d'))
assert ('FollowUpDate.1' not in self.data_raw.columns)
def _add_physicalactivity_instances(self):
data_pa = pd.read_csv(
'/n/groups/patel/Alan/Aging/TimeSeries/series/PhysicalActivity/90001/features/PA_visit_date.csv')
data_pa['eid'] = data_pa['eid'].astype(str)
data_pa.set_index('eid', drop=False, inplace=True)
data_pa.index.name = 'column_names'
self.data_raw = self.data_raw.merge(data_pa, on=['eid'], how='outer')
self.data_raw.set_index('eid', drop=False, inplace=True)
def _compute_age(self):
# Recompute age with greater precision by leveraging the month of birth
self.data_raw.dropna(subset=['Year_of_birth'], inplace=True)
self.data_raw['Year_of_birth'] = self.data_raw['Year_of_birth'].astype(int)
self.data_raw['Month_of_birth'] = self.data_raw['Month_of_birth'].astype(int)
self.data_raw['Date_of_birth'] = self.data_raw.apply(
lambda row: datetime(row.Year_of_birth, row.Month_of_birth, 15), axis=1)
for i in self.instances:
self.data_raw['Date_attended_center_' + i] = self.data_raw['Date_attended_center_' + i].apply(
lambda x: pd.NaT if pd.isna(x) else datetime.strptime(x, '%Y-%m-%d'))
self.data_raw['Age_' + i] = self.data_raw['Date_attended_center_' + i] - self.data_raw['Date_of_birth']
self.data_raw['Age_' + i] = self.data_raw['Age_' + i].dt.days / 365.25
self.data_raw['FollowUpTime_' + i] = self.data_raw['FollowUpDate'] - self.data_raw[
'Date_attended_center_' + i]
self.data_raw['FollowUpTime_' + i] = self.data_raw['FollowUpTime_' + i].dt.days / 365.25
self.data_raw.drop(['Date_attended_center_' + i], axis=1, inplace=True)
self.data_raw.drop(['Year_of_birth', 'Month_of_birth', 'Date_of_birth', 'FollowUpDate'], axis=1, inplace=True)
self.data_raw.dropna(how='all', subset=['Age_0', 'Age_1', 'Age_1.5', 'Age_1.51', 'Age_1.52', 'Age_1.53',
'Age_1.54', 'Age_2', 'Age_3'], inplace=True)
def _concatenate_instances(self):
self.data_features = None
for i in self.instances:
print('Preparing the samples for instance ' + i)
df_i = self.data_raw.dropna(subset=['Age_' + i])
print(str(len(df_i.index)) + ' samples found in instance ' + i)
dict_names = {}
features = ['Age', 'FollowUpTime']
for feature in features:
dict_names[feature + '_' + i] = feature
self.dict_names = dict_names
df_i.rename(columns=dict_names, inplace=True)
df_i['instance'] = i
df_i['id'] = df_i['eid'] + '_' + df_i['instance']
df_i = df_i[['id', 'eid', 'instance'] + self.survival_vars]
if self.data_features is None:
self.data_features = df_i
else:
self.data_features = self.data_features.append(df_i)
print('The size of the full concatenated dataframe is now ' + str(len(self.data_features.index)))
# Add * instance for eids
survival_eids = self.data_features[self.data_features['instance'] == '0']
survival_eids['instance'] = '*'
survival_eids['id'] = survival_eids['eid'] + '_' + survival_eids['instance']
self.data_features = self.data_features.append(survival_eids)
def generate_data(self):
# Formatting
self._preprocessing()
self._add_physicalactivity_instances()
self._compute_age()
self._concatenate_instances()
# save data
self.data_features.to_csv('../data/data_survival.csv', index=False)
class MyImageDataGenerator(Basics, Sequence, ImageDataGenerator):
"""
Helper class: custom data generator for images.
It handles several custom features such as:
- provides batches of not only images, but also the scalar data (e.g demographics) that correspond to it
- it performs random shuffling while making sure that no leftover data (the remainder of the modulo batch size)
is being unused
- it can handle paired data for paired organs (e.g left/right eyes)
"""
def __init__(self, target=None, organ=None, view=None, data_features=None, n_samples_per_subepoch=None,
batch_size=None, training_mode=None, side_predictors=None, dir_images=None, images_width=None,
images_height=None, data_augmentation=False, data_augmentation_factor=None, seed=None):
# Parameters
Basics.__init__(self)
self.target = target
if target in self.targets_regression:
self.labels = data_features[target]
else:
self.labels = data_features[target + '_raw']
self.organ = organ
self.view = view
self.training_mode = training_mode
self.data_features = data_features
self.list_ids = data_features.index.values
self.batch_size = batch_size
# for paired organs, take twice fewer ids (two images for each id), and add organ_side as side predictor
if organ + '_' + view in self.left_right_organs_views:
self.data_features['organ_side'] = np.nan
self.n_ids_batch = batch_size // 2
else:
self.n_ids_batch = batch_size
if self.training_mode & (n_samples_per_subepoch is not None): # during training, 1 epoch = number of samples
self.steps = math.ceil(n_samples_per_subepoch / batch_size)
else: # during prediction and other tasks, an epoch is defined as all the samples being seen once and only once
self.steps = math.ceil(len(self.list_ids) / self.n_ids_batch)
# learning_rate_patience
if n_samples_per_subepoch is not None:
self.n_subepochs_per_epoch = math.ceil(len(self.data_features.index) / n_samples_per_subepoch)
# initiate the indices and shuffle the ids
self.shuffle = training_mode # Only shuffle if the model is being trained. Otherwise no need.
self.indices = np.arange(len(self.list_ids))
self.idx_end = 0 # Keep track of last indice to permute indices accordingly at the end of epoch.
if self.shuffle:
np.random.shuffle(self.indices)
# Input for side NN and CNN
self.side_predictors = side_predictors
self.dir_images = dir_images
self.images_width = images_width
self.images_height = images_height
# Data augmentation
self.data_augmentation = data_augmentation
self.data_augmentation_factor = data_augmentation_factor
self.seed = seed
# Parameters for data augmentation: (rotation range, width shift range, height shift range, zoom range)
self.augmentation_parameters = \
pd.DataFrame(index=['Brain_MRI', 'Eyes_Fundus', 'Eyes_OCT', 'Arterial_Carotids', 'Heart_MRI',
'Abdomen_Liver', 'Abdomen_Pancreas', 'Musculoskeletal_Spine', 'Musculoskeletal_Hips',
'Musculoskeletal_Knees', 'Musculoskeletal_FullBody', 'PhysicalActivity_FullWeek',
'PhysicalActivity_Walking'],
columns=['rotation', 'width_shift', 'height_shift', 'zoom'])
self.augmentation_parameters.loc['Brain_MRI', :] = [10, 0.05, 0.1, 0.0]
self.augmentation_parameters.loc['Eyes_Fundus', :] = [20, 0.02, 0.02, 0]
self.augmentation_parameters.loc['Eyes_OCT', :] = [30, 0.1, 0.2, 0]
self.augmentation_parameters.loc[['Arterial_Carotids'], :] = [0, 0.2, 0.0, 0.0]
self.augmentation_parameters.loc[['Heart_MRI', 'Abdomen_Liver', 'Abdomen_Pancreas',
'Musculoskeletal_Spine'], :] = [10, 0.1, 0.1, 0.0]
self.augmentation_parameters.loc[['Musculoskeletal_Hips', 'Musculoskeletal_Knees'], :] = [10, 0.1, 0.1, 0.1]
self.augmentation_parameters.loc[['Musculoskeletal_FullBody'], :] = [10, 0.05, 0.02, 0.0]
self.augmentation_parameters.loc[['PhysicalActivity_FullWeek'], :] = [0, 0, 0, 0.0]
organ_view = organ + '_' + view
ImageDataGenerator.__init__(self, rescale=1. / 255.,
rotation_range=self.augmentation_parameters.loc[organ_view, 'rotation'],
width_shift_range=self.augmentation_parameters.loc[organ_view, 'width_shift'],
height_shift_range=self.augmentation_parameters.loc[organ_view, 'height_shift'],
zoom_range=self.augmentation_parameters.loc[organ_view, 'zoom'])
def __len__(self):
return self.steps
def on_epoch_end(self):
_ = gc.collect()
self.indices = np.concatenate([self.indices[self.idx_end:], self.indices[:self.idx_end]])
def _generate_image(self, path_image):
img = load_img(path_image, target_size=(self.images_width, self.images_height), color_mode='rgb')
Xi = img_to_array(img)
if hasattr(img, 'close'):
img.close()
if self.data_augmentation:
params = self.get_random_transform(Xi.shape)
Xi = self.apply_transform(Xi, params)
Xi = self.standardize(Xi)
return Xi
def _data_generation(self, list_ids_batch):
# initialize empty matrices
n_samples_batch = min(len(list_ids_batch), self.batch_size)
X = np.empty((n_samples_batch, self.images_width, self.images_height, 3)) * np.nan
x = np.empty((n_samples_batch, len(self.side_predictors))) * np.nan
y = np.empty((n_samples_batch, 1)) * np.nan
# fill the matrices sample by sample
for i, ID in enumerate(list_ids_batch):
y[i] = self.labels[ID]
x[i] = self.data_features.loc[ID, self.side_predictors]
if self.organ + '_' + self.view in self.left_right_organs_views:
if i % 2 == 0:
path = self.dir_images + 'right/'
x[i][-1] = 0
else:
path = self.dir_images + 'left/'
x[i][-1] = 1
if not os.path.exists(path + ID + '.jpg'):
path = path.replace('/right/', '/left/') if i % 2 == 0 else path.replace('/left/', '/right/')
x[i][-1] = 1 - x[i][-1]
else:
path = self.dir_images
X[i, :, :, :] = self._generate_image(path_image=path + ID + '.jpg')
return [X, x], y
def __getitem__(self, index):
# Select the indices
idx_start = (index * self.n_ids_batch) % len(self.list_ids)
idx_end = (((index + 1) * self.n_ids_batch) - 1) % len(self.list_ids) + 1
if idx_start > idx_end:
# If this happens outside of training, that is a mistake
if not self.training_mode:
print('\nERROR: Outside of training, every sample should only be predicted once!')
sys.exit(1)
# Select part of the indices from the end of the epoch
indices = self.indices[idx_start:]
# Generate a new set of indices
# print('\nThe end of the data was reached within this batch, looping.')
if self.shuffle:
np.random.shuffle(self.indices)
# Complete the batch with samples from the new indices
indices = np.concatenate([indices, self.indices[:idx_end]])
else:
indices = self.indices[idx_start: idx_end]
if idx_end == len(self.list_ids) & self.shuffle:
# print('\nThe end of the data was reached. Shuffling for the next epoch.')
np.random.shuffle(self.indices)
# Keep track of last indice for end of subepoch
self.idx_end = idx_end
# Select the corresponding ids
list_ids_batch = [self.list_ids[i] for i in indices]
# For paired organs, two images (left, right eyes) are selected for each id.
if self.organ + '_' + self.view in self.left_right_organs_views:
list_ids_batch = [ID for ID in list_ids_batch for _ in ('right', 'left')]
return self._data_generation(list_ids_batch)
class MyCSVLogger(Callback):
"""
Custom CSV Logger callback class for Keras training: append to existing file if can be found. Allows to keep track
of training over several jobs.
"""
def __init__(self, filename, separator=',', append=False):
self.sep = separator
self.filename = filename
self.append = append
self.writer = None
self.keys = None
self.append_header = True
self.csv_file = None
if six.PY2:
self.file_flags = 'b'
self._open_args = {}
else:
self.file_flags = ''
self._open_args = {'newline': '\n'}
Callback.__init__(self)
def on_train_begin(self, logs=None):
if self.append:
if file_io.file_exists(self.filename):
with open(self.filename, 'r' + self.file_flags) as f:
self.append_header = not bool(len(f.readline()))
mode = 'a'
else:
mode = 'w'
self.csv_file = io.open(self.filename, mode + self.file_flags, **self._open_args)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
def handle_value(k):
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
if isinstance(k, six.string_types):
return k
elif isinstance(k, collections_abc.Iterable) and not is_zero_dim_ndarray:
return '"[%s]"' % (', '.join(map(str, k)))
else:
return k
if self.keys is None:
self.keys = sorted(logs.keys())
if self.model.stop_training:
# We set NA so that csv parsers do not fail for this last epoch.
logs = dict([(k, logs[k]) if k in logs else (k, 'NA') for k in self.keys])
if not self.writer:
class CustomDialect(csv.excel):
delimiter = self.sep
fieldnames = ['epoch', 'learning_rate'] + self.keys
if six.PY2:
fieldnames = [unicode(x) for x in fieldnames]
self.writer = csv.DictWriter(
self.csv_file,
fieldnames=fieldnames,
dialect=CustomDialect)
if self.append_header:
self.writer.writeheader()
row_dict = collections.OrderedDict({'epoch': epoch, 'learning_rate': eval(self.model.optimizer.lr)})
row_dict.update((key, handle_value(logs[key])) for key in self.keys)
self.writer.writerow(row_dict)
self.csv_file.flush()
def on_train_end(self, logs=None):
self.csv_file.close()
self.writer = None
class MyModelCheckpoint(ModelCheckpoint):
"""
Custom checkpoint callback class for Keras training. Handles a baseline performance.
"""
def __init__(self, filepath, monitor='val_loss', baseline=-np.Inf, verbose=0, save_best_only=False,
save_weights_only=False, mode='auto', save_freq='epoch'):
# Parameters
ModelCheckpoint.__init__(self, filepath, monitor=monitor, verbose=verbose, save_best_only=save_best_only,
save_weights_only=save_weights_only, mode=mode, save_freq=save_freq)
if mode == 'min':
self.monitor_op = np.less
self.best = baseline
elif mode == 'max':
self.monitor_op = np.greater
self.best = baseline
else:
print('Error. mode for metric must be either min or max')
sys.exit(1)
class DeepLearning(Metrics):
"""
Core helper class to train models. Used to:
- build the data generators
- generate the CNN architectures
- load the weights
"""
def __init__(self, target=None, organ=None, view=None, transformation=None, architecture=None, n_fc_layers=None,
n_fc_nodes=None, optimizer=None, learning_rate=None, weight_decay=None, dropout_rate=None,
data_augmentation_factor=None, debug_mode=False):
# Initialization
Metrics.__init__(self)
tf.random.set_seed(self.seed)
# Model's version
self.target = target
self.organ = organ
self.view = view
self.transformation = transformation
self.architecture = architecture
self.n_fc_layers = int(n_fc_layers)
self.n_fc_nodes = int(n_fc_nodes)
self.optimizer = optimizer
self.learning_rate = float(learning_rate)
self.weight_decay = float(weight_decay)
self.dropout_rate = float(dropout_rate)
self.data_augmentation_factor = float(data_augmentation_factor)
self.outer_fold = None
self.version = target + '_' + organ + '_' + view + '_' + transformation + '_' + architecture + '_' + \
n_fc_layers + '_' + n_fc_nodes + '_' + optimizer + '_' + learning_rate + '_' + weight_decay + \
'_' + dropout_rate + '_' + data_augmentation_factor
# NNet's architecture and weights
self.side_predictors = self.dict_side_predictors[target]
if self.organ + '_' + self.view in self.left_right_organs_views:
self.side_predictors.append('organ_side')
self.dict_final_activations = {'regression': 'linear', 'binary': 'sigmoid', 'multiclass': 'softmax',
'saliency': 'linear'}
self.path_load_weights = None
self.keras_weights = None
# Generators
self.debug_mode = debug_mode
self.debug_fraction = 0.005
self.DATA_FEATURES = {}
self.mode = None
self.n_cpus = len(os.sched_getaffinity(0))
self.dir_images = '../images/' + organ + '/' + view + '/' + transformation + '/'
# define dictionary to fit the architecture's input size to the images sizes (take min (height, width))
self.dict_organ_view_transformation_to_image_size = {
'Eyes_Fundus_Raw': (316, 316), # initial size (1388, 1388)
'Eyes_OCT_Raw': (312, 320), # initial size (500, 512)
'Musculoskeletal_Spine_Sagittal': (466, 211), # initial size (1513, 684)
'Musculoskeletal_Spine_Coronal': (315, 313), # initial size (724, 720)
'Musculoskeletal_Hips_MRI': (329, 303), # initial size (626, 680)
'Musculoskeletal_Knees_MRI': (347, 286) # initial size (851, 700)
}
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['Brain_MRI_SagittalRaw', 'Brain_MRI_SagittalReference', 'Brain_MRI_CoronalRaw',
'Brain_MRI_CoronalReference', 'Brain_MRI_TransverseRaw', 'Brain_MRI_TransverseReference'],
(316, 316))) # initial size (88, 88)
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['Arterial_Carotids_Mixed', 'Arterial_Carotids_LongAxis', 'Arterial_Carotids_CIMT120',
'Arterial_Carotids_CIMT150', 'Arterial_Carotids_ShortAxis'],
(337, 291))) # initial size (505, 436)
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['Heart_MRI_2chambersRaw', 'Heart_MRI_2chambersContrast', 'Heart_MRI_3chambersRaw',
'Heart_MRI_3chambersContrast', 'Heart_MRI_4chambersRaw', 'Heart_MRI_4chambersContrast'],
(316, 316))) # initial size (200, 200)
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['Abdomen_Liver_Raw', 'Abdomen_Liver_Contrast'], (288, 364))) # initial size (288, 364)
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['Abdomen_Pancreas_Raw', 'Abdomen_Pancreas_Contrast'], (288, 350))) # initial size (288, 350)
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['Musculoskeletal_FullBody_Figure', 'Musculoskeletal_FullBody_Skeleton',
'Musculoskeletal_FullBody_Flesh', 'Musculoskeletal_FullBody_Mixed'],
(541, 181))) # initial size (811, 272)
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['PhysicalActivity_FullWeek_GramianAngularField1minDifference',
'PhysicalActivity_FullWeek_GramianAngularField1minSummation',
'PhysicalActivity_FullWeek_MarkovTransitionField1min',
'PhysicalActivity_FullWeek_RecurrencePlots1min'],
(316, 316))) # initial size (316, 316)
self.dict_architecture_to_image_size = {'MobileNet': (224, 224), 'MobileNetV2': (224, 224),
'NASNetMobile': (224, 224), 'NASNetLarge': (331, 331)}
if self.architecture in ['MobileNet', 'MobileNetV2', 'NASNetMobile', 'NASNetLarge']:
self.image_width, self.image_height = self.dict_architecture_to_image_size[architecture]
else:
self.image_width, self.image_height = \
self.dict_organ_view_transformation_to_image_size[organ + '_' + view + '_' + transformation]
# define dictionary of batch sizes to fit as many samples as the model's architecture allows
self.dict_batch_sizes = {
# Default, applies to all images with resized input ~100,000 pixels
'Default': {'VGG16': 32, 'VGG19': 32, 'DenseNet121': 16, 'DenseNet169': 16, 'DenseNet201': 16,
'Xception': 32, 'InceptionV3': 32, 'InceptionResNetV2': 8, 'ResNet50': 32, 'ResNet101': 16,
'ResNet152': 16, 'ResNet50V2': 32, 'ResNet101V2': 16, 'ResNet152V2': 16, 'ResNeXt50': 4,
'ResNeXt101': 8, 'EfficientNetB7': 4,
'MobileNet': 128, 'MobileNetV2': 64, 'NASNetMobile': 64, 'NASNetLarge': 4}}
# Define batch size
if organ + '_' + view in self.dict_batch_sizes.keys():
randoself.batch_size = self.dict_batch_sizes[organ + '_' + view][architecture]
else:
self.batch_size = self.dict_batch_sizes['Default'][architecture]
# double the batch size for the teslaM40 cores that have bigger memory
if len(GPUtil.getGPUs()) > 0: # make sure GPUs are available (not truesometimes for debugging)
if GPUtil.getGPUs()[0].memoryTotal > 20000:
self.batch_size *= 2
# Define number of ids per batch (twice fewer for paired organs, because left and right samples)
self.n_ids_batch = self.batch_size
if organ + '_' + view in self.left_right_organs_views:
self.n_ids_batch //= 2
# Define number of samples per subepoch
if debug_mode:
self.n_samples_per_subepoch = self.batch_size * 4
else:
self.n_samples_per_subepoch = 32768
if organ + '_' + view in self.left_right_organs_views:
self.n_samples_per_subepoch //= 2
# dict to decide which field is used to generate the ids when several targets share the same ids
self.dict_target_to_ids = dict.fromkeys(['Age', 'Sex'], 'Age')
# Note: R-Squared and F1-Score are not available, because their batch based values are misleading.
# For some reason, Sensitivity and Specificity are not available either. Might implement later.
self.dict_losses_K = {'MSE': MeanSquaredError(name='MSE'),
'Binary-Crossentropy': BinaryCrossentropy(name='Binary-Crossentropy')}
self.dict_metrics_K = {'R-Squared': RSquare(name='R-Squared', y_shape=(1,)),
'RMSE': RootMeanSquaredError(name='RMSE'),
'F1-Score': F1Score(name='F1-Score', num_classes=1, dtype=tf.float32),
'ROC-AUC': AUC(curve='ROC', name='ROC-AUC'),
'PR-AUC': AUC(curve='PR', name='PR-AUC'),
'Binary-Accuracy': BinaryAccuracy(name='Binary-Accuracy'),
'Precision': Precision(name='Precision'),
'Recall': Recall(name='Recall'),
'True-Positives': TruePositives(name='True-Positives'),
'False-Positives': FalsePositives(name='False-Positives'),
'False-Negatives': FalseNegatives(name='False-Negatives'),
'True-Negatives': TrueNegatives(name='True-Negatives')}
# Metrics
self.prediction_type = self.dict_prediction_types[target]
self.loss_name = self.dict_losses_names[self.prediction_type]
self.loss_function = self.dict_losses_K[self.loss_name]
self.main_metric_name = self.dict_main_metrics_names_K[target]
self.main_metric_mode = self.main_metrics_modes[self.main_metric_name]
self.main_metric = self.dict_metrics_K[self.main_metric_name]
self.metrics_names = [self.main_metric_name]
self.metrics = [self.dict_metrics_K[metric_name] for metric_name in self.metrics_names]
# Optimizers
self.optimizers = {'Adam': Adam, 'RMSprop': RMSprop, 'Adadelta': Adadelta}
# Model
self.model = None
@staticmethod
def _append_ext(fn):
return fn + ".jpg"
def _load_data_features(self):
for fold in self.folds:
self.DATA_FEATURES[fold] = pd.read_csv(
self.path_data + 'data-features_' + self.organ + '_' + self.view + '_' + self.transformation + '_' +
self.dict_target_to_ids[self.target] + '_' + fold + '_' + self.outer_fold + '.csv')
for col_name in self.id_vars:
self.DATA_FEATURES[fold][col_name] = self.DATA_FEATURES[fold][col_name].astype(str)
self.DATA_FEATURES[fold].set_index('id', drop=False, inplace=True)
def _take_subset_to_debug(self):
for fold in self.folds:
# use +1 or +2 to test the leftovers pipeline
leftovers_extra = {'train': 0, 'val': 1, 'test': 2}
n_batches = 2
n_limit_fold = leftovers_extra[fold] + self.batch_size * n_batches
self.DATA_FEATURES[fold] = self.DATA_FEATURES[fold].iloc[:n_limit_fold, :]
def _generate_generators(self, DATA_FEATURES):
GENERATORS = {}
for fold in self.folds:
# do not generate a generator if there are no samples (can happen for leftovers generators)
if fold not in DATA_FEATURES.keys():
continue
# parameters
training_mode = True if self.mode == 'model_training' else False
if (fold == 'train') & (self.mode == 'model_training') & \
(self.organ + '_' + self.view not in self.organsviews_not_to_augment):
data_augmentation = True
else:
data_augmentation = False
# define batch size for testing: data is split between a part that fits in batches, and leftovers
if self.mode == 'model_testing':
if self.organ + '_' + self.view in self.left_right_organs_views:
n_samples = len(DATA_FEATURES[fold].index) * 2
else:
n_samples = len(DATA_FEATURES[fold].index)
batch_size_fold = min(self.batch_size, n_samples)
else:
batch_size_fold = self.batch_size
if (fold == 'train') & (self.mode == 'model_training'):
n_samples_per_subepoch = self.n_samples_per_subepoch
else:
n_samples_per_subepoch = None
# generator
GENERATORS[fold] = \
MyImageDataGenerator(target=self.target, organ=self.organ, view=self.view,
data_features=DATA_FEATURES[fold], n_samples_per_subepoch=n_samples_per_subepoch,
batch_size=batch_size_fold, training_mode=training_mode,
side_predictors=self.side_predictors, dir_images=self.dir_images,
images_width=self.image_width, images_height=self.image_height,
data_augmentation=data_augmentation,
data_augmentation_factor=self.data_augmentation_factor, seed=self.seed)
return GENERATORS
def _generate_class_weights(self):
if self.dict_prediction_types[self.target] == 'binary':
self.class_weights = {}
counts = self.DATA_FEATURES['train'][self.target + '_raw'].value_counts()
n_total = counts.sum()
# weighting the samples for each class inversely proportional to their prevalence, with order of magnitude 1
for i in counts.index.values:
self.class_weights[i] = n_total / (counts.loc[i] * len(counts.index))
def _generate_cnn(self):
# define the arguments
# take special initial weights for EfficientNetB7 (better)
if (self.architecture == 'EfficientNetB7') & (self.keras_weights == 'imagenet'):
w = 'noisy-student'
else:
w = self.keras_weights
kwargs = {"include_top": False, "weights": w, "input_shape": (self.image_width, self.image_height, 3)}
if self.architecture in ['ResNet50', 'ResNet101', 'ResNet152', 'ResNet50V2', 'ResNet101V2', 'ResNet152V2',
'ResNeXt50', 'ResNeXt101']:
import tensorflow.keras
kwargs.update(
{"backend": tensorflow.keras.backend, "layers": tensorflow.keras.layers,
"models": tensorflow.keras.models, "utils": tensorflow.keras.utils})
# load the architecture builder
if self.architecture == 'VGG16':
from tensorflow.keras.applications.vgg16 import VGG16 as ModelBuilder
elif self.architecture == 'VGG19':
from tensorflow.keras.applications.vgg19 import VGG19 as ModelBuilder
elif self.architecture == 'DenseNet121':
from tensorflow.keras.applications.densenet import DenseNet121 as ModelBuilder
elif self.architecture == 'DenseNet169':
from tensorflow.keras.applications.densenet import DenseNet169 as ModelBuilder
elif self.architecture == 'DenseNet201':
from tensorflow.keras.applications.densenet import DenseNet201 as ModelBuilder
elif self.architecture == 'Xception':
from tensorflow.keras.applications.xception import Xception as ModelBuilder
elif self.architecture == 'InceptionV3':
from tensorflow.keras.applications.inception_v3 import InceptionV3 as ModelBuilder
elif self.architecture == 'InceptionResNetV2':
from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2 as ModelBuilder
elif self.architecture == 'ResNet50':
from keras_applications.resnet import ResNet50 as ModelBuilder
elif self.architecture == 'ResNet101':
from keras_applications.resnet import ResNet101 as ModelBuilder
elif self.architecture == 'ResNet152':
from keras_applications.resnet import ResNet152 as ModelBuilder
elif self.architecture == 'ResNet50V2':
from keras_applications.resnet_v2 import ResNet50V2 as ModelBuilder
elif self.architecture == 'ResNet101V2':
from keras_applications.resnet_v2 import ResNet101V2 as ModelBuilder
elif self.architecture == 'ResNet152V2':
from keras_applications.resnet_v2 import ResNet152V2 as ModelBuilder
elif self.architecture == 'ResNeXt50':
from keras_applications.resnext import ResNeXt50 as ModelBuilder
elif self.architecture == 'ResNeXt101':
from keras_applications.resnext import ResNeXt101 as ModelBuilder
elif self.architecture == 'EfficientNetB7':
from efficientnet.tfkeras import EfficientNetB7 as ModelBuilder
# The following model have a fixed input size requirement
elif self.architecture == 'NASNetMobile':
from tensorflow.keras.applications.nasnet import NASNetMobile as ModelBuilder
elif self.architecture == 'NASNetLarge':
from tensorflow.keras.applications.nasnet import NASNetLarge as ModelBuilder
elif self.architecture == 'MobileNet':
from tensorflow.keras.applications.mobilenet import MobileNet as ModelBuilder
elif self.architecture == 'MobileNetV2':
from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2 as ModelBuilder
else:
print('Architecture does not exist.')
sys.exit(1)
# build the model's base
cnn = ModelBuilder(**kwargs)
x = cnn.output
# complete the model's base
if self.architecture in ['VGG16', 'VGG19']:
x = Flatten()(x)
x = Dense(4096, activation='relu', kernel_regularizer=regularizers.l2(self.weight_decay))(x)
x = Dropout(self.dropout_rate)(x)
x = Dense(4096, activation='relu', kernel_regularizer=regularizers.l2(self.weight_decay))(x)
x = Dropout(self.dropout_rate)(x)
else:
x = GlobalAveragePooling2D()(x)
if self.architecture == 'EfficientNetB7':
x = Dropout(self.dropout_rate)(x)
cnn_output = x
return cnn.input, cnn_output
def _generate_side_nn(self):
side_nn = Sequential()
side_nn.add(Dense(16, input_dim=len(self.side_predictors), activation="relu",
kernel_regularizer=regularizers.l2(self.weight_decay)))
return side_nn.input, side_nn.output
def _complete_architecture(self, cnn_input, cnn_output, side_nn_input, side_nn_output):
x = concatenate([cnn_output, side_nn_output])
x = Dropout(self.dropout_rate)(x)
for n in [int(self.n_fc_nodes * (2 ** (2 * (self.n_fc_layers - 1 - i)))) for i in range(self.n_fc_layers)]:
x = Dense(n, activation='relu', kernel_regularizer=regularizers.l2(self.weight_decay))(x)
# scale the dropout proportionally to the number of nodes in a layer. No dropout for the last layers
if n > 16:
x = Dropout(self.dropout_rate * n / 1024)(x)
predictions = Dense(1, activation=self.dict_final_activations[self.prediction_type],
kernel_regularizer=regularizers.l2(self.weight_decay))(x)
self.model = Model(inputs=[cnn_input, side_nn_input], outputs=predictions)
def _generate_architecture(self):
cnn_input, cnn_output = self._generate_cnn()
side_nn_input, side_nn_output = self._generate_side_nn()
self._complete_architecture(cnn_input=cnn_input, cnn_output=cnn_output, side_nn_input=side_nn_input,
side_nn_output=side_nn_output)
def _load_model_weights(self):
try:
self.model.load_weights(self.path_load_weights)
except (FileNotFoundError, TypeError):
# load backup weights if the main weights are corrupted
try:
self.model.load_weights(self.path_load_weights.replace('model-weights', 'backup-model-weights'))
except FileNotFoundError:
print('Error. No file was found. imagenet weights should have been used. Bug somewhere.')
sys.exit(1)
@staticmethod
def clean_exit():
# exit
print('\nDone.\n')
print('Killing JOB PID with kill...')
os.system('touch ../eo/' + os.environ['SLURM_JOBID'])
os.system('kill ' + str(os.getpid()))
time.sleep(60)
print('Escalating to kill JOB PID with kill -9...')
os.system('kill -9 ' + str(os.getpid()))
time.sleep(60)
print('Escalating to kill JOB ID')
os.system('scancel ' + os.environ['SLURM_JOBID'])
time.sleep(60)
print('Everything failed to kill the job. Hanging there until hitting walltime...')
class Training(DeepLearning):
"""
Class to train CNN models:
- Generates the architecture
- Loads the best last weights so that a model can be trained over several jobs
- Generates the callbacks
- Compiles the model
- Trains the model
"""
def __init__(self, target=None, organ=None, view=None, transformation=None, architecture=None, n_fc_layers=None,
n_fc_nodes=None, optimizer=None, learning_rate=None, weight_decay=None, dropout_rate=None,
data_augmentation_factor=None, outer_fold=None, debug_mode=False, transfer_learning=None,
continue_training=True, display_full_metrics=True):
# parameters
DeepLearning.__init__(self, target, organ, view, transformation, architecture, n_fc_layers, n_fc_nodes,
optimizer, learning_rate, weight_decay, dropout_rate, data_augmentation_factor,
debug_mode)
self.outer_fold = outer_fold
self.version = self.version + '_' + str(outer_fold)
# NNet's architecture's weights
self.continue_training = continue_training
self.transfer_learning = transfer_learning
self.list_parameters_to_match = ['organ', 'transformation', 'view']
# dict to decide in which order targets should be used when trying to transfer weight from a similar model
self.dict_alternative_targets_for_transfer_learning = {'Age': ['Age', 'Sex'], 'Sex': ['Sex', 'Age']}
# Generators
self.folds = ['train', 'val']
self.mode = 'model_training'
self.class_weights = None
self.GENERATORS = None
# Metrics
self.baseline_performance = None
if display_full_metrics:
self.metrics_names = self.dict_metrics_names_K[self.prediction_type]
# Model
self.path_load_weights = self.path_data + 'model-weights_' + self.version + '.h5'
if debug_mode:
self.path_save_weights = self.path_data + 'model-weights-debug.h5'
else:
self.path_save_weights = self.path_data + 'model-weights_' + self.version + '.h5'
self.n_epochs_max = 100000
self.callbacks = None
# Load and preprocess the data, build the generators
def data_preprocessing(self):
self._load_data_features()
if self.debug_mode:
self._take_subset_to_debug()
self._generate_class_weights()
self.GENERATORS = self._generate_generators(self.DATA_FEATURES)
# Determine which weights to load, if any.
def _weights_for_transfer_learning(self):
print('Looking for models to transfer weights from...')
# define parameters
parameters = self._version_to_parameters(self.version)
# continue training if possible
if self.continue_training and os.path.exists(self.path_load_weights):
print('Loading the weights from the model\'s previous training iteration.')
return
# Initialize the weights using other the weights from other successful hyperparameters combinations
if self.transfer_learning == 'hyperparameters':
# Check if the same model with other hyperparameters have already been trained. Pick the best for transfer.
params = self.version.split('_')
params_tl_idx = \
[i for i in range(len(names_model_parameters))
if any(names_model_parameters[i] == p for p in
['optimizer', 'learning_rate', 'weight_decay', 'dropout_rate', 'data_augmentation_factor'])]
for idx in params_tl_idx:
params[idx] = '*'
versions = '../eo/MI02_' + '_'.join(params) + '.out'
files = glob.glob(versions)
if self.main_metric_mode == 'min':
best_perf = np.Inf
else:
best_perf = -np.Inf
for file in files:
hand = open(file, 'r')
# find best last performance
final_improvement_line = None
baseline_performance_line = None
for line in hand:
line = line.rstrip()
if re.search('Baseline validation ' + self.main_metric_name + ' = ', line):
baseline_performance_line = line
if re.search('val_' + self.main_metric_name + ' improved from', line):
final_improvement_line = line
hand.close()
if final_improvement_line is not None:
perf = float(final_improvement_line.split(' ')[7].replace(',', ''))
elif baseline_performance_line is not None:
perf = float(baseline_performance_line.split(' ')[-1])
else:
continue
# Keep track of the file with the best performance
if self.main_metric_mode == 'min':
update = perf < best_perf
else:
update = perf > best_perf
if update:
best_perf = perf
self.path_load_weights = \
file.replace('../eo/', self.path_data).replace('MI02', 'model-weights').replace('.out', '.h5')
if best_perf not in [-np.Inf, np.Inf]:
print('Transfering the weights from: ' + self.path_load_weights + ', with ' + self.main_metric_name +
' = ' + str(best_perf))
return
# Initialize the weights based on models trained on different datasets, ranked by similarity
if self.transfer_learning == 'datasets':
while True:
# print('Matching models for the following criterias:');
# print(['architecture', 'target'] + list_parameters_to_match)
# start by looking for models trained on the same target, then move to other targets
for target_to_load in self.dict_alternative_targets_for_transfer_learning[parameters['target']]:
# print('Target used: ' + target_to_load)
parameters_to_match = parameters.copy()
parameters_to_match['target'] = target_to_load
# load the ranked performances table to select the best performing model among the similar
# models available
path_performances_to_load = self.path_data + 'PERFORMANCES_ranked_' + \
parameters_to_match['target'] + '_' + 'val' + '.csv'
try:
Performances = pd.read_csv(path_performances_to_load)
Performances['organ'] = Performances['organ'].astype(str)
except FileNotFoundError:
# print("Could not load the file: " + path_performances_to_load)
break
# iteratively get rid of models that are not similar enough, based on the list
for parameter in ['architecture', 'target'] + self.list_parameters_to_match:
Performances = Performances[Performances[parameter] == parameters_to_match[parameter]]
# if at least one model is similar enough, load weights from the best of them
if len(Performances.index) != 0:
self.path_load_weights = self.path_data + 'model-weights_' + Performances['version'][0] + '.h5'
self.keras_weights = None
print('transfering the weights from: ' + self.path_load_weights)
return
# if no similar model was found, try again after getting rid of the last selection criteria
if len(self.list_parameters_to_match) == 0:
print('No model found for transfer learning.')
break
self.list_parameters_to_match.pop()
# Otherwise use imagenet weights to initialize
print('Using imagenet weights.')
# using string instead of None for path to not ge
self.path_load_weights = None
self.keras_weights = 'imagenet'
def _compile_model(self):
# if learning rate was reduced with success according to logger, start with this reduced learning rate
if self.path_load_weights is not None:
path_logger = self.path_load_weights.replace('model-weights', 'logger').replace('.h5', '.csv')
else:
path_logger = self.path_data + 'logger_' + self.version + '.csv'
if os.path.exists(path_logger):
try:
logger = pd.read_csv(path_logger)
best_log = \
logger[logger['val_' + self.main_metric_name] == logger['val_' + self.main_metric_name].max()]
lr = best_log['learning_rate'].values[0]
except pd.errors.EmptyDataError:
os.remove(path_logger)
lr = self.learning_rate
else:
lr = self.learning_rate
self.model.compile(optimizer=self.optimizers[self.optimizer](lr=lr, clipnorm=1.0), loss=self.loss_function,
metrics=self.metrics)
def _compute_baseline_performance(self):
# calculate initial val_loss value
if self.continue_training:
idx_metric_name = ([self.loss_name] + self.metrics_names).index(self.main_metric_name)
baseline_perfs = self.model.evaluate(self.GENERATORS['val'], steps=self.GENERATORS['val'].steps)
self.baseline_performance = baseline_perfs[idx_metric_name]
elif self.main_metric_mode == 'min':
self.baseline_performance = np.Inf
else:
self.baseline_performance = -np.Inf
print('Baseline validation ' + self.main_metric_name + ' = ' + str(self.baseline_performance))
def _define_callbacks(self):
if self.debug_mode:
path_logger = self.path_data + 'logger-debug.csv'
append = False
else:
path_logger = self.path_data + 'logger_' + self.version + '.csv'
append = self.continue_training
csv_logger = MyCSVLogger(path_logger, separator=',', append=append)
model_checkpoint_backup = MyModelCheckpoint(self.path_save_weights.replace('model-weights',
'backup-model-weights'),
monitor='val_' + self.main_metric.name,
baseline=self.baseline_performance, verbose=1, save_best_only=True,
save_weights_only=True, mode=self.main_metric_mode,
save_freq='epoch')
model_checkpoint = MyModelCheckpoint(self.path_save_weights,
monitor='val_' + self.main_metric.name, baseline=self.baseline_performance,
verbose=1, save_best_only=True, save_weights_only=True,
mode=self.main_metric_mode, save_freq='epoch')
patience_reduce_lr = min(7, 3 * self.GENERATORS['train'].n_subepochs_per_epoch)
reduce_lr_on_plateau = ReduceLROnPlateau(monitor='loss', factor=0.5, patience=patience_reduce_lr, verbose=1,
mode='min', min_delta=0, cooldown=0, min_lr=0)
early_stopping = EarlyStopping(monitor='val_' + self.main_metric.name, min_delta=0, patience=15, verbose=0,
mode=self.main_metric_mode, baseline=self.baseline_performance,
restore_best_weights=True)
self.callbacks = [csv_logger, model_checkpoint_backup, model_checkpoint, early_stopping, reduce_lr_on_plateau]
def build_model(self):
self._weights_for_transfer_learning()
self._generate_architecture()
# Load weights if possible
try:
load_weights = True if os.path.exists(self.path_load_weights) else False
except TypeError:
load_weights = False
if load_weights:
self._load_model_weights()
else:
# save transferred weights as default, in case no better weights are found
self.model.save_weights(self.path_save_weights.replace('model-weights', 'backup-model-weights'))
self.model.save_weights(self.path_save_weights)
self._compile_model()
self._compute_baseline_performance()
self._define_callbacks()
def train_model(self):
# garbage collector
_ = gc.collect()
# use more verbose when debugging
verbose = 1 if self.debug_mode else 2
# train the model
self.model.fit(self.GENERATORS['train'], steps_per_epoch=self.GENERATORS['train'].steps,
validation_data=self.GENERATORS['val'], validation_steps=self.GENERATORS['val'].steps,
shuffle=False, use_multiprocessing=False, workers=self.n_cpus, epochs=self.n_epochs_max,
class_weight=self.class_weights, callbacks=self.callbacks, verbose=verbose)
class PredictionsGenerate(DeepLearning):
"""
Generates the predictions for each model.
Unscales the predictions.
"""
def __init__(self, target=None, organ=None, view=None, transformation=None, architecture=None, n_fc_layers=None,
n_fc_nodes=None, optimizer=None, learning_rate=None, weight_decay=None, dropout_rate=None,
data_augmentation_factor=None, outer_fold=None, debug_mode=False):
# Initialize parameters
DeepLearning.__init__(self, target, organ, view, transformation, architecture, n_fc_layers, n_fc_nodes,
optimizer, learning_rate, weight_decay, dropout_rate, data_augmentation_factor,
debug_mode)
self.outer_fold = outer_fold
self.mode = 'model_testing'
# Define dictionaries attributes for data, generators and predictions
self.DATA_FEATURES_BATCH = {}
self.DATA_FEATURES_LEFTOVERS = {}
self.GENERATORS_BATCH = None
self.GENERATORS_LEFTOVERS = None
self.PREDICTIONS = {}
def _split_batch_leftovers(self):
# split the samples into two groups: what can fit into the batch size, and the leftovers.
for fold in self.folds:
n_leftovers = len(self.DATA_FEATURES[fold].index) % self.n_ids_batch
if n_leftovers > 0:
self.DATA_FEATURES_BATCH[fold] = self.DATA_FEATURES[fold].iloc[:-n_leftovers]
self.DATA_FEATURES_LEFTOVERS[fold] = self.DATA_FEATURES[fold].tail(n_leftovers)
else:
self.DATA_FEATURES_BATCH[fold] = self.DATA_FEATURES[fold] # special case for syntax if no leftovers
if fold in self.DATA_FEATURES_LEFTOVERS.keys():
del self.DATA_FEATURES_LEFTOVERS[fold]
def _generate_outerfold_predictions(self):
# prepare unscaling
if self.target in self.targets_regression:
mean_train = self.DATA_FEATURES['train'][self.target + '_raw'].mean()
std_train = self.DATA_FEATURES['train'][self.target + '_raw'].std()
else:
mean_train, std_train = None, None
# Generate predictions
for fold in self.folds:
print('Predicting samples from fold ' + fold + '.')
print(str(len(self.DATA_FEATURES[fold].index)) + ' samples to predict.')
print('Predicting batches: ' + str(len(self.DATA_FEATURES_BATCH[fold].index)) + ' samples.')
pred_batch = self.model.predict(self.GENERATORS_BATCH[fold], steps=self.GENERATORS_BATCH[fold].steps,
verbose=1)
if fold in self.GENERATORS_LEFTOVERS.keys():
print('Predicting leftovers: ' + str(len(self.DATA_FEATURES_LEFTOVERS[fold].index)) + ' samples.')
pred_leftovers = self.model.predict(self.GENERATORS_LEFTOVERS[fold],
steps=self.GENERATORS_LEFTOVERS[fold].steps, verbose=1)
pred_full = np.concatenate((pred_batch, pred_leftovers)).squeeze()
else:
pred_full = pred_batch.squeeze()
print('Predicted a total of ' + str(len(pred_full)) + ' samples.')
# take the average between left and right predictions for paired organs
if self.organ + '_' + self.view in self.left_right_organs_views:
pred_full = np.mean(pred_full.reshape(-1, 2), axis=1)
# unscale predictions
if self.target in self.targets_regression:
pred_full = pred_full * std_train + mean_train
# format the dataframe
self.DATA_FEATURES[fold]['pred'] = pred_full
self.PREDICTIONS[fold] = self.DATA_FEATURES[fold]
self.PREDICTIONS[fold]['id'] = [ID.replace('.jpg', '') for ID in self.PREDICTIONS[fold]['id']]
def _generate_predictions(self):
self.path_load_weights = self.path_data + 'model-weights_' + self.version + '_' + self.outer_fold + '.h5'
self._load_data_features()
if self.debug_mode:
self._take_subset_to_debug()
self._load_model_weights()
self._split_batch_leftovers()
# generate the generators
self.GENERATORS_BATCH = self._generate_generators(DATA_FEATURES=self.DATA_FEATURES_BATCH)
if self.DATA_FEATURES_LEFTOVERS is not None:
self.GENERATORS_LEFTOVERS = self._generate_generators(DATA_FEATURES=self.DATA_FEATURES_LEFTOVERS)
self._generate_outerfold_predictions()
def _format_predictions(self):
for fold in self.folds:
perf_fun = self.dict_metrics_sklearn[self.dict_main_metrics_names[self.target]]
perf = perf_fun(self.PREDICTIONS[fold][self.target + '_raw'], self.PREDICTIONS[fold]['pred'])
print('The ' + fold + ' performance is: ' + str(perf))
# format the predictions
self.PREDICTIONS[fold].index.name = 'column_names'
self.PREDICTIONS[fold] = self.PREDICTIONS[fold][['id', 'outer_fold', 'pred']]
def generate_predictions(self):
self._generate_architecture()
self._generate_predictions()
self._format_predictions()
def save_predictions(self):
for fold in self.folds:
self.PREDICTIONS[fold].to_csv(self.path_data + 'Predictions_instances_' + self.version + '_' + fold + '_'
+ self.outer_fold + '.csv', index=False)
class PredictionsConcatenate(Basics):
"""
Concatenates the predictions coming from the different cross validation folds.
"""
def __init__(self, target=None, organ=None, view=None, transformation=None, architecture=None, n_fc_layers=None,
n_fc_nodes=None, optimizer=None, learning_rate=None, weight_decay=None, dropout_rate=None,
data_augmentation_factor=None):
# Initialize parameters
Basics.__init__(self)
self.version = target + '_' + organ + '_' + view + '_' + transformation + '_' + architecture + '_' + \
n_fc_layers + '_' + n_fc_nodes + '_' + optimizer + '_' + learning_rate + '_' + weight_decay + \
'_' + dropout_rate + '_' + data_augmentation_factor
# Define dictionaries attributes for data, generators and predictions
self.PREDICTIONS = {}
def concatenate_predictions(self):
for fold in self.folds:
for outer_fold in self.outer_folds:
Predictions_fold = pd.read_csv(self.path_data + 'Predictions_instances_' + self.version + '_' + fold +
'_' + outer_fold + '.csv')
if fold in self.PREDICTIONS.keys():
self.PREDICTIONS[fold] = pd.concat([self.PREDICTIONS[fold], Predictions_fold])
else:
self.PREDICTIONS[fold] = Predictions_fold
def save_predictions(self):
for fold in self.folds:
self.PREDICTIONS[fold].to_csv(self.path_data + 'Predictions_instances_' + self.version + '_' + fold +
'.csv', index=False)
class PredictionsMerge(Basics):
"""
Merges the predictions from all models into a unified dataframe.
"""
def __init__(self, target=None, fold=None):
Basics.__init__(self)
# Define dictionaries attributes for data, generators and predictions
self.target = target
self.fold = fold
self.data_features = None
self.list_models = None
self.Predictions_df_previous = None
self.Predictions_df = None
def _load_data_features(self):
self.data_features = pd.read_csv(self.path_data + 'data-features_instances.csv',
usecols=self.id_vars + self.demographic_vars)
for var in self.id_vars:
self.data_features[var] = self.data_features[var].astype(str)
self.data_features.set_index('id', drop=False, inplace=True)
self.data_features.index.name = 'column_names'
def _preprocess_data_features(self):
# For the training set, each sample is predicted n_CV_outer_folds times, so prepare a larger dataframe
if self.fold == 'train':
df_all_folds = None
for outer_fold in self.outer_folds:
df_fold = self.data_features.copy()
df_all_folds = df_fold if outer_fold == self.outer_folds[0] else df_all_folds.append(df_fold)
self.data_features = df_all_folds
def _load_previous_merged_predictions(self):
if os.path.exists(self.path_data + 'PREDICTIONS_withoutEnsembles_instances_' + self.target + '_' + self.fold +
'.csv'):
self.Predictions_df_previous = pd.read_csv(self.path_data + 'PREDICTIONS_withoutEnsembles_instances_' +
self.target + '_' + self.fold + '.csv')
self.Predictions_df_previous.drop(columns=['eid', 'instance'] + self.demographic_vars, inplace=True)
def _list_models(self):
# generate list of predictions that will be integrated in the Predictions dataframe
self.list_models = glob.glob(self.path_data + 'Predictions_instances_' + self.target + '_*_' + self.fold +
'.csv')
# get rid of ensemble models and models already merged
self.list_models = [model for model in self.list_models if ('*' not in model)]
if self.Predictions_df_previous is not None:
self.list_models = \
[model for model in self.list_models
if ('pred_' + '_'.join(model.split('_')[2:-1]) not in self.Predictions_df_previous.columns)]
self.list_models.sort()
def preprocessing(self):
self._load_data_features()
self._preprocess_data_features()
self._load_previous_merged_predictions()
self._list_models()
def merge_predictions(self):
# merge the predictions
print('There are ' + str(len(self.list_models)) + ' models to merge.')
i = 0
# define subgroups to accelerate merging process
list_subgroups = list(set(['_'.join(model.split('_')[3:7]) for model in self.list_models]))
for subgroup in list_subgroups:
print('Merging models from the subgroup ' + subgroup)
models_subgroup = [model for model in self.list_models if subgroup in model]
Predictions_subgroup = None
# merge the models one by one
for file_name in models_subgroup:
i += 1
version = '_'.join(file_name.split('_')[2:-1])
if self.Predictions_df_previous is not None and \
'pred_' + version in self.Predictions_df_previous.columns:
print('The model ' + version + ' has already been merged before.')
else:
print('Merging the ' + str(i) + 'th model: ' + version)
# load csv and format the predictions
prediction = pd.read_csv(self.path_data + file_name)
print('raw prediction\'s shape: ' + str(prediction.shape))
for var in ['id', 'outer_fold']:
prediction[var] = prediction[var].apply(str)
prediction.rename(columns={'pred': 'pred_' + version}, inplace=True)
# merge data frames
if Predictions_subgroup is None:
Predictions_subgroup = prediction
elif self.fold == 'train':
Predictions_subgroup = Predictions_subgroup.merge(prediction, how='outer',
on=['id', 'outer_fold'])
else:
prediction.drop(['outer_fold'], axis=1, inplace=True)
# not supported for panda version > 0.23.4 for now
Predictions_subgroup = Predictions_subgroup.merge(prediction, how='outer', on=['id'])
# merge group predictions data frames
if self.fold != 'train':
Predictions_subgroup.drop(['outer_fold'], axis=1, inplace=True)
if Predictions_subgroup is not None:
if self.Predictions_df is None:
self.Predictions_df = Predictions_subgroup
elif self.fold == 'train':
self.Predictions_df = self.Predictions_df.merge(Predictions_subgroup, how='outer',
on=['id', 'outer_fold'])
else:
# not supported for panda version > 0.23.4 for now
self.Predictions_df = self.Predictions_df.merge(Predictions_subgroup, how='outer', on=['id'])
print('Predictions_df\'s shape: ' + str(self.Predictions_df.shape))
# garbage collector
gc.collect()
# Merge with the previously merged predictions
if (self.Predictions_df_previous is not None) & (self.Predictions_df is not None):
if self.fold == 'train':
self.Predictions_df = self.Predictions_df_previous.merge(self.Predictions_df, how='outer',
on=['id', 'outer_fold'])
else:
self.Predictions_df.drop(columns=['outer_fold'], inplace=True)
# not supported for panda version > 0.23.4 for now
self.Predictions_df = self.Predictions_df_previous.merge(self.Predictions_df, how='outer', on=['id'])
self.Predictions_df_previous = None
elif self.Predictions_df is None:
print('No new models to merge. Exiting.')
print('Done.')
sys.exit(0)
# Reorder the columns alphabetically
pred_versions = [col for col in self.Predictions_df.columns if 'pred_' in col]
pred_versions.sort()
id_cols = ['id', 'outer_fold'] if self.fold == 'train' else ['id']
self.Predictions_df = self.Predictions_df[id_cols + pred_versions]
def postprocessing(self):
# get rid of useless rows in data_features before merging to keep the memory requirements as low as possible
self.data_features = self.data_features[self.data_features['id'].isin(self.Predictions_df['id'].values)]
# merge data_features and predictions
if self.fold == 'train':
print('Starting to merge a massive dataframe')
self.Predictions_df = self.data_features.merge(self.Predictions_df, how='outer', on=['id', 'outer_fold'])
else:
# not supported for panda version > 0.23.4 for now
self.Predictions_df = self.data_features.merge(self.Predictions_df, how='outer', on=['id'])
print('Merging done')
# remove rows for which no prediction is available (should be none)
subset_cols = [col for col in self.Predictions_df.columns if 'pred_' in col]
self.Predictions_df.dropna(subset=subset_cols, how='all', inplace=True)
# Displaying the R2s
versions = [col.replace('pred_', '') for col in self.Predictions_df.columns if 'pred_' in col]
r2s = []
for version in versions:
df = self.Predictions_df[[self.target, 'pred_' + version]].dropna()
r2s.append(r2_score(df[self.target], df['pred_' + version]))
R2S = pd.DataFrame({'version': versions, 'R2': r2s})
R2S.sort_values(by='R2', ascending=False, inplace=True)
print('R2 for each model: ')
print(R2S)
def save_merged_predictions(self):
print('Writing the merged predictions...')
self.Predictions_df.to_csv(self.path_data + 'PREDICTIONS_withoutEnsembles_instances_' + self.target + '_' +
self.fold + '.csv', index=False)
class PredictionsEids(Basics):
"""
Computes the average age prediction across samples from different instances for every participant.
(Scaled back to instance 0)
"""
def __init__(self, target=None, fold=None, debug_mode=None):
Basics.__init__(self)
# Define dictionaries attributes for data, generators and predictions
self.target = target
self.fold = fold
self.debug_mode = debug_mode
self.Predictions = None
self.Predictions_chunk = None
self.pred_versions = None
self.res_versions = None
self.target_0s = None
self.Predictions_eids = None
self.Predictions_eids_previous = None
self.pred_versions_previous = None
def preprocessing(self):
# Load predictions
self.Predictions = pd.read_csv(
self.path_data + 'PREDICTIONS_withoutEnsembles_instances_' + self.target + '_' + self.fold + '.csv')
self.Predictions.drop(columns=['id'], inplace=True)
self.Predictions['eid'] = self.Predictions['eid'].astype(str)
self.Predictions.index.name = 'column_names'
self.pred_versions = [col for col in self.Predictions.columns.values if 'pred_' in col]
# Prepare target values on instance 0 as a reference
target_0s = pd.read_csv(self.path_data + 'data-features_eids.csv', usecols=['eid', self.target])
target_0s['eid'] = target_0s['eid'].astype(str)
target_0s.set_index('eid', inplace=True)
target_0s = target_0s[self.target]
target_0s.name = 'target_0'
target_0s = target_0s[self.Predictions['eid'].unique()]
self.Predictions = self.Predictions.merge(target_0s, on='eid')
# Compute biological ages reported to target_0
for pred in self.pred_versions:
# Compute the biais of the predictions as a function of age
print('Generating residuals for model ' + pred.replace('pred_', ''))
df_model = self.Predictions[['Age', pred]]
df_model.dropna(inplace=True)
if (len(df_model.index)) > 0:
age = df_model.loc[:, ['Age']]
res = df_model['Age'] - df_model[pred]
regr = LinearRegression()
regr.fit(age, res)
self.Predictions[pred.replace('pred_', 'correction_')] = regr.predict(self.Predictions[['Age']])
# Take the residuals bias into account when "translating" the prediction to instance 0
correction = self.Predictions['target_0'] - self.Predictions[self.target] + \
regr.predict(self.Predictions[['Age']]) - regr.predict(self.Predictions[['target_0']])
self.Predictions[pred] = self.Predictions[pred] + correction
self.Predictions[self.target] = self.Predictions['target_0']
self.Predictions.drop(columns=['target_0'], inplace=True)
self.Predictions.index.name = 'column_names'
def processing(self):
if self.fold == 'train':
# Prepare template to which each model will be appended
Predictions = self.Predictions[['eid'] + self.demographic_vars]
Predictions = Predictions.groupby('eid', as_index=True).mean()
Predictions.index.name = 'column_names'
Predictions['eid'] = Predictions.index.values
Predictions['instance'] = '*'
Predictions['id'] = Predictions['eid'] + '_*'
self.Predictions_eids = Predictions.copy()
self.Predictions_eids['outer_fold'] = -1
for i in range(self.n_CV_outer_folds):
Predictions_i = Predictions.copy()
Predictions_i['outer_fold'] = i
self.Predictions_eids = self.Predictions_eids.append(Predictions_i)
# Append each model one by one because the folds are different
print(str(len(self.pred_versions)) + ' models to compute.')
for pred_version in self.pred_versions:
if pred_version in self.pred_versions_previous:
print(pred_version.replace('pred_', '') + ' had already been computed.')
else:
print("Computing results for version " + pred_version.replace('pred_', ''))
Predictions_version = self.Predictions[['eid', pred_version, 'outer_fold']]
# Use placeholder for NaN in outer_folds
Predictions_version['outer_fold'][Predictions_version['outer_fold'].isna()] = -1
Predictions_version_eids = Predictions_version.groupby(['eid', 'outer_fold'], as_index=False).mean()
self.Predictions_eids = self.Predictions_eids.merge(Predictions_version_eids,
on=['eid', 'outer_fold'], how='outer')
self.Predictions_eids[of_version] = self.Predictions_eids['outer_fold']
self.Predictions_eids[of_version][self.Predictions_eids[of_version] == -1] = np.nan
del Predictions_version
_ = gc.collect
self.Predictions_eids.drop(columns=['outer_fold'], inplace=True)
else:
self.Predictions_eids = self.Predictions.groupby('eid').mean()
self.Predictions_eids['eid'] = self.Predictions_eids.index.values
self.Predictions_eids['instance'] = '*'
self.Predictions_eids['id'] = self.Predictions_eids['eid'].astype(str) + '_' + \
self.Predictions_eids['instance']
# Re-order the columns
self.Predictions_eids = self.Predictions_eids[self.id_vars + self.demographic_vars + self.pred_versions]
def postprocessing(self):
# Displaying the R2s
versions = [col.replace('pred_', '') for col in self.Predictions_eids.columns if 'pred_' in col]
r2s = []
for version in versions:
df = self.Predictions_eids[[self.target, 'pred_' + version]].dropna()
r2s.append(r2_score(df[self.target], df['pred_' + version]))
R2S = pd.DataFrame({'version': versions, 'R2': r2s})
R2S.sort_values(by='R2', ascending=False, inplace=True)
print('R2 for each model: ')
print(R2S)
def _generate_single_model_predictions(self):
for pred_version in self.pred_versions:
path_save = \
self.path_data + 'Predictions_eids_' + '_'.join(pred_version.split('_')[1:]) + '_' + self.fold + '.csv'
# Generate only if does not exist already.
if not os.path.exists(path_save):
Predictions_version = self.Predictions_eids[['id', 'outer_fold', pred_version]]
Predictions_version.rename(columns={pred_version: 'pred'}, inplace=True)
Predictions_version.dropna(subset=['pred'], inplace=True)
Predictions_version.to_csv(path_save, index=False)
def save_predictions(self):
self.Predictions_eids.to_csv(self.path_data + 'PREDICTIONS_withoutEnsembles_eids_' + self.target + '_' +
self.fold + '.csv', index=False)
# Generate and save files for every single model
self._generate_single_model_predictions()
class PerformancesGenerate(Metrics):
"""
Computes the performances for each model.
"""
def __init__(self, target=None, organ=None, view=None, transformation=None, architecture=None, n_fc_layers=None,
n_fc_nodes=None, optimizer=None, learning_rate=None, weight_decay=None, dropout_rate=None,
data_augmentation_factor=None, fold=None, pred_type=None, debug_mode=False):
Metrics.__init__(self)
self.target = target
self.organ = organ
self.view = view
self.transformation = transformation
self.architecture = architecture
self.n_fc_layers = n_fc_layers
self.n_fc_nodes = n_fc_nodes
self.optimizer = optimizer
self.learning_rate = learning_rate
self.weight_decay = weight_decay
self.dropout_rate = dropout_rate
self.data_augmentation_factor = data_augmentation_factor
self.fold = fold
self.pred_type = pred_type
if debug_mode:
self.n_bootstrap_iterations = 3
else:
self.n_bootstrap_iterations = 1000
self.version = target + '_' + organ + '_' + view + '_' + transformation + '_' + architecture + '_' + \
n_fc_layers + '_' + n_fc_nodes + '_' + optimizer + '_' + learning_rate + '_' + weight_decay + \
'_' + dropout_rate + '_' + data_augmentation_factor
self.names_metrics = self.dict_metrics_names[self.dict_prediction_types[target]]
self.data_features = None
self.Predictions = None
self.PERFORMANCES = None
def _preprocess_data_features_predictions_for_performances(self):
# load dataset
data_features = pd.read_csv(self.path_data + 'data-features_' + self.pred_type + '.csv',
usecols=['id', 'Sex', 'Age'])
# format data_features to extract y
data_features.rename(columns={self.target: 'y'}, inplace=True)
data_features = data_features[['id', 'y']]
data_features['id'] = data_features['id'].astype(str)
data_features['id'] = data_features['id']
data_features.set_index('id', drop=False, inplace=True)
data_features.index.name = 'columns_names'
self.data_features = data_features
def _preprocess_predictions_for_performances(self):
Predictions = pd.read_csv(self.path_data + 'Predictions_' + self.pred_type + '_' + self.version + '_' +
self.fold + '.csv')
Predictions['id'] = Predictions['id'].astype(str)
self.Predictions = Predictions.merge(self.data_features, how='inner', on=['id'])
# Initialize performances dataframes and compute sample sizes
def _initiate_empty_performances_df(self):
# Define an empty performances dataframe to store the performances computed
row_names = ['all'] + self.outer_folds
col_names_sample_sizes = ['N']
if self.target in self.targets_binary:
col_names_sample_sizes.extend(['N_0', 'N_1'])
col_names = ['outer_fold'] + col_names_sample_sizes
col_names.extend(self.names_metrics)
performances = np.empty((len(row_names), len(col_names),))
performances.fill(np.nan)
performances = pd.DataFrame(performances)
performances.index = row_names
performances.columns = col_names
performances['outer_fold'] = row_names
# Convert float to int for sample sizes and some metrics.
for col_name in col_names_sample_sizes:
# need recent version of pandas to use type below. Otherwise nan cannot be int
performances[col_name] = performances[col_name].astype('Int64')
# compute sample sizes for the data frame
performances.loc['all', 'N'] = len(self.Predictions.index)
if self.target in self.targets_binary:
performances.loc['all', 'N_0'] = len(self.Predictions.loc[self.Predictions['y'] == 0].index)
performances.loc['all', 'N_1'] = len(self.Predictions.loc[self.Predictions['y'] == 1].index)
for outer_fold in self.outer_folds:
performances.loc[outer_fold, 'N'] = len(
self.Predictions.loc[self.Predictions['outer_fold'] == int(outer_fold)].index)
if self.target in self.targets_binary:
performances.loc[outer_fold, 'N_0'] = len(
self.Predictions.loc[
(self.Predictions['outer_fold'] == int(outer_fold)) & (self.Predictions['y'] == 0)].index)
performances.loc[outer_fold, 'N_1'] = len(
self.Predictions.loc[
(self.Predictions['outer_fold'] == int(outer_fold)) & (self.Predictions['y'] == 1)].index)
# initialize the dataframes
self.PERFORMANCES = {}
for mode in self.modes:
self.PERFORMANCES[mode] = performances.copy()
# Convert float to int for sample sizes and some metrics.
for col_name in self.PERFORMANCES[''].columns.values:
if any(metric in col_name for metric in self.metrics_displayed_in_int):
# need recent version of pandas to use type below. Otherwise nan cannot be int
self.PERFORMANCES[''][col_name] = self.PERFORMANCES[''][col_name].astype('Int64')
def preprocessing(self):
self._preprocess_data_features_predictions_for_performances()
self._preprocess_predictions_for_performances()
self._initiate_empty_performances_df()
# Fill the columns for this model, outer_fold by outer_fold
def compute_performances(self):
# fill it outer_fold by outer_fold
for outer_fold in ['all'] + self.outer_folds:
print('Calculating the performances for the outer fold ' + outer_fold)
# Generate a subdataframe from the predictions table for each outerfold
if outer_fold == 'all':
predictions_fold = self.Predictions.copy()
else:
predictions_fold = self.Predictions.loc[self.Predictions['outer_fold'] == int(outer_fold), :]
# if no samples are available for this fold, fill columns with nans
if len(predictions_fold.index) == 0:
print('NO SAMPLES AVAILABLE FOR MODEL ' + self.version + ' IN OUTER_FOLD ' + outer_fold)
else:
# For binary classification, generate class prediction
if self.target in self.targets_binary:
predictions_fold_class = predictions_fold.copy()
predictions_fold_class['pred'] = predictions_fold_class['pred'].round()
else:
predictions_fold_class = None
# Fill the Performances dataframe metric by metric
for name_metric in self.names_metrics:
# print('Calculating the performance using the metric ' + name_metric)
if name_metric in self.metrics_needing_classpred:
predictions_metric = predictions_fold_class
else:
predictions_metric = predictions_fold
metric_function = self.dict_metrics_sklearn[name_metric]
self.PERFORMANCES[''].loc[outer_fold, name_metric] = metric_function(predictions_metric['y'],
predictions_metric['pred'])
self.PERFORMANCES['_sd'].loc[outer_fold, name_metric] = \
self._bootstrap(predictions_metric, metric_function)[1]
self.PERFORMANCES['_str'].loc[outer_fold, name_metric] = "{:.3f}".format(
self.PERFORMANCES[''].loc[outer_fold, name_metric]) + '+-' + "{:.3f}".format(
self.PERFORMANCES['_sd'].loc[outer_fold, name_metric])
# calculate the fold sd (variance between the metrics values obtained on the different folds)
folds_sd = self.PERFORMANCES[''].iloc[1:, :].std(axis=0)
for name_metric in self.names_metrics:
self.PERFORMANCES['_str'].loc['all', name_metric] = "{:.3f}".format(
self.PERFORMANCES[''].loc['all', name_metric]) + '+-' + "{:.3f}".format(
folds_sd[name_metric]) + '+-' + "{:.3f}".format(self.PERFORMANCES['_sd'].loc['all', name_metric])
# print the performances
print('Performances for model ' + self.version + ': ')
print(self.PERFORMANCES['_str'])
def save_performances(self):
for mode in self.modes:
path_save = self.path_data + 'Performances_' + self.pred_type + '_' + self.version + '_' + self.fold + \
mode + '.csv'
self.PERFORMANCES[mode].to_csv(path_save, index=False)
class PerformancesMerge(Metrics):
"""
Merges the performances of the different models into a unified dataframe.
"""
def __init__(self, target=None, fold=None, pred_type=None, ensemble_models=None):
# Parameters
Metrics.__init__(self)
self.target = target
self.fold = fold
self.pred_type = pred_type
self.ensemble_models = self.convert_string_to_boolean(ensemble_models)
self.names_metrics = self.dict_metrics_names[self.dict_prediction_types[target]]
self.main_metric_name = self.dict_main_metrics_names[target]
# list the models that need to be merged
self.list_models = glob.glob(self.path_data + 'Performances_' + pred_type + '_' + target + '_*_' + fold +
'_str.csv')
# get rid of ensemble models
if self.ensemble_models:
self.list_models = [model for model in self.list_models if '*' in model]
else:
self.list_models = [model for model in self.list_models if '*' not in model]
self.Performances = None
self.Performances_alphabetical = None
self.Performances_ranked = None
def _initiate_empty_performances_summary_df(self):
# Define the columns of the Performances dataframe
# columns for sample sizes
names_sample_sizes = ['N']
if self.target in self.targets_binary:
names_sample_sizes.extend(['N_0', 'N_1'])
# columns for metrics
names_metrics = self.dict_metrics_names[self.dict_prediction_types[self.target]]
# for normal folds, keep track of metric and bootstrapped metric's sd
names_metrics_with_sd = []
for name_metric in names_metrics:
names_metrics_with_sd.extend([name_metric, name_metric + '_sd', name_metric + '_str'])
# for the 'all' fold, also keep track of the 'folds_sd' (metric's sd calculated using the folds' results)
names_metrics_with_folds_sd_and_sd = []
for name_metric in names_metrics:
names_metrics_with_folds_sd_and_sd.extend(
[name_metric, name_metric + '_folds_sd', name_metric + '_sd', name_metric + '_str'])
# merge all the columns together. First description of the model, then sample sizes and metrics for each fold
names_col_Performances = ['version'] + self.names_model_parameters
# special outer fold 'all'
names_col_Performances.extend(
['_'.join([name, 'all']) for name in names_sample_sizes + names_metrics_with_folds_sd_and_sd])
# other outer_folds
for outer_fold in self.outer_folds:
names_col_Performances.extend(
['_'.join([name, outer_fold]) for name in names_sample_sizes + names_metrics_with_sd])
# Generate the empty Performance table from the rows and columns.
Performances = np.empty((len(self.list_models), len(names_col_Performances),))
Performances.fill(np.nan)
Performances = pd.DataFrame(Performances)
Performances.columns = names_col_Performances
# Format the types of the columns
for colname in Performances.columns.values:
if (colname in self.names_model_parameters) | ('_str' in colname):
col_type = str
else:
col_type = float
Performances[colname] = Performances[colname].astype(col_type)
self.Performances = Performances
def merge_performances(self):
# define parameters
names_metrics = self.dict_metrics_names[self.dict_prediction_types[self.target]]
# initiate dataframe
self._initiate_empty_performances_summary_df()
# Fill the Performance table row by row
for i, model in enumerate(self.list_models):
# load the performances subdataframe
PERFORMANCES = {}
for mode in self.modes:
PERFORMANCES[mode] = pd.read_csv(model.replace('_str', mode))
PERFORMANCES[mode].set_index('outer_fold', drop=False, inplace=True)
# Fill the columns corresponding to the model's parameters
version = '_'.join(model.split('_')[2:-2])
parameters = self._version_to_parameters(version)
# fill the columns for model parameters
self.Performances['version'][i] = version
for parameter_name in self.names_model_parameters:
self.Performances[parameter_name][i] = parameters[parameter_name]
# Fill the columns for this model, outer_fold by outer_fold
for outer_fold in ['all'] + self.outer_folds:
# Generate a subdataframe from the predictions table for each outerfold
# Fill sample size columns
self.Performances['N_' + outer_fold][i] = PERFORMANCES[''].loc[outer_fold, 'N']
# For binary classification, calculate sample sizes for each class and generate class prediction
if self.target in self.targets_binary:
self.Performances['N_0_' + outer_fold][i] = PERFORMANCES[''].loc[outer_fold, 'N_0']
self.Performances['N_1_' + outer_fold][i] = PERFORMANCES[''].loc[outer_fold, 'N_1']
# Fill the Performances dataframe metric by metric
for name_metric in names_metrics:
for mode in self.modes:
self.Performances[name_metric + mode + '_' + outer_fold][i] = PERFORMANCES[mode].loc[
outer_fold, name_metric]
# calculate the fold sd (variance between the metrics values obtained on the different folds)
folds_sd = PERFORMANCES[''].iloc[1:, :].std(axis=0)
for name_metric in names_metrics:
self.Performances[name_metric + '_folds_sd_all'] = folds_sd[name_metric]
# Convert float to int for sample sizes and some metrics.
for name_col in self.Performances.columns.values:
cond1 = name_col.startswith('N_')
cond2 = any(metric in name_col for metric in self.metrics_displayed_in_int)
cond3 = '_sd' not in name_col
cond4 = '_str' not in name_col
if cond1 | cond2 & cond3 & cond4:
self.Performances[name_col] = self.Performances[name_col].astype('Int64')
# need recent version of pandas to use this type. Otherwise nan cannot be int
# For ensemble models, merge the new performances with the previously computed performances
if self.ensemble_models:
Performances_withoutEnsembles = pd.read_csv(self.path_data + 'PERFORMANCES_tuned_alphabetical_' +
self.pred_type + '_' + self.target + '_' + self.fold + '.csv')
self.Performances = Performances_withoutEnsembles.append(self.Performances)
# reorder the columns (weird: automatic alphabetical re-ordering happened when append was called for 'val')
self.Performances = self.Performances[Performances_withoutEnsembles.columns]
# Ranking, printing and saving
self.Performances_alphabetical = self.Performances.sort_values(by='version')
cols_to_print = ['version', self.main_metric_name + '_str_all']
print('Performances of the models ranked by models\'names:')
print(self.Performances_alphabetical[cols_to_print])
sort_by = self.dict_main_metrics_names[self.target] + '_all'
sort_ascending = self.main_metrics_modes[self.dict_main_metrics_names[self.target]] == 'min'
self.Performances_ranked = self.Performances.sort_values(by=sort_by, ascending=sort_ascending)
print('Performances of the models ranked by the performance on the main metric on all the samples:')
print(self.Performances_ranked[cols_to_print])
def save_performances(self):
name_extension = 'withEnsembles' if self.ensemble_models else 'withoutEnsembles'
path = self.path_data + 'PERFORMANCES_' + name_extension + '_alphabetical_' + self.pred_type + '_' + \
self.target + '_' + self.fold + '.csv'
self.Performances_alphabetical.to_csv(path, index=False)
self.Performances_ranked.to_csv(path.replace('_alphabetical_', '_ranked_'), index=False)
class PerformancesTuning(Metrics):
"""
For each model, selects the best hyperparameter combination.
"""
def __init__(self, target=None, pred_type=None):
Metrics.__init__(self)
self.target = target
self.pred_type = pred_type
self.PERFORMANCES = {}
self.PREDICTIONS = {}
self.Performances = None
self.models = None
self.folds = ['val', 'test']
def load_data(self):
for fold in self.folds:
path = self.path_data + 'PERFORMANCES_withoutEnsembles_ranked_' + self.pred_type + '_' + self.target + \
'_' + fold + '.csv'
self.PERFORMANCES[fold] = pd.read_csv(path).set_index('version', drop=False)
self.PERFORMANCES[fold]['organ'] = self.PERFORMANCES[fold]['organ'].astype(str)
self.PERFORMANCES[fold].index.name = 'columns_names'
self.PREDICTIONS[fold] = pd.read_csv(path.replace('PERFORMANCES', 'PREDICTIONS').replace('_ranked', ''))
def preprocess_data(self):
# Get list of distinct models without taking into account hyperparameters tuning
self.Performances = self.PERFORMANCES['val']
self.Performances['model'] = self.Performances['organ'] + '_' + self.Performances['view'] + '_' + \
self.Performances['transformation'] + '_' + self.Performances['architecture']
self.models = self.Performances['model'].unique()
def select_models(self):
main_metric_name = self.dict_main_metrics_names[self.target]
main_metric_mode = self.main_metrics_modes[main_metric_name]
Perf_col_name = main_metric_name + '_all'
for model in self.models:
Performances_model = self.Performances[self.Performances['model'] == model]
Performances_model.sort_values([Perf_col_name, 'n_fc_layers', 'n_fc_nodes', 'learning_rate', 'dropout_rate',
'weight_decay', 'data_augmentation_factor'],
ascending=[main_metric_mode == 'min', True, True, False, False, False,
False], inplace=True)
best_version = Performances_model['version'][
Performances_model[Perf_col_name] == Performances_model[Perf_col_name].max()].values[0]
versions_to_drop = [version for version in Performances_model['version'].values if
not version == best_version]
# define columns from predictions to drop
cols_to_drop = ['pred_' + version for version in versions_to_drop] + ['outer_fold_' + version for version in
versions_to_drop]
for fold in self.folds:
self.PERFORMANCES[fold].drop(versions_to_drop, inplace=True)
self.PREDICTIONS[fold].drop(cols_to_drop, axis=1, inplace=True)
# drop 'model' column
self.Performances.drop(['model'], axis=1, inplace=True)
# Display results
for fold in self.folds:
print('The tuned ' + fold + ' performances are:')
print(self.PERFORMANCES[fold])
def save_data(self):
# Save the files
for fold in self.folds:
path_pred = self.path_data + 'PREDICTIONS_tuned_' + self.pred_type + '_' + self.target + '_' + fold + \
'.csv'
path_perf = self.path_data + 'PERFORMANCES_tuned_ranked_' + self.pred_type + '_' + self.target + '_' + \
fold + '.csv'
self.PREDICTIONS[fold].to_csv(path_pred, index=False)
self.PERFORMANCES[fold].to_csv(path_perf, index=False)
Performances_alphabetical = self.PERFORMANCES[fold].sort_values(by='version')
Performances_alphabetical.to_csv(path_perf.replace('ranked', 'alphabetical'), index=False)
# This class was coded by <NAME>.
class InnerCV:
"""
Helper class to perform an inner cross validation to tune the hyperparameters of models trained on scalar predictors
"""
def __init__(self, models, inner_splits, n_iter):
self.inner_splits = inner_splits
self.n_iter = n_iter
if isinstance(models, str):
models = [models]
self.models = models
@staticmethod
def get_model(model_name, params):
if model_name == 'ElasticNet':
return ElasticNet(max_iter=2000, **params)
elif model_name == 'RandomForest':
return RandomForestRegressor(**params)
elif model_name == 'GradientBoosting':
return GradientBoostingRegressor(**params)
elif model_name == 'Xgboost':
return XGBRegressor(**params)
elif model_name == 'LightGbm':
return LGBMRegressor(**params)
elif model_name == 'NeuralNetwork':
return MLPRegressor(solver='adam',
activation='relu',
hidden_layer_sizes=(128, 64, 32),
batch_size=1000,
early_stopping=True, **params)
@staticmethod
def get_hyper_distribution(model_name):
if model_name == 'ElasticNet':
return {
'alpha': hp.loguniform('alpha', low=np.log(0.01), high=np.log(10)),
'l1_ratio': hp.uniform('l1_ratio', low=0.01, high=0.99)
}
elif model_name == 'RandomForest':
return {
'n_estimators': hp.randint('n_estimators', upper=300) + 150,
'max_features': hp.choice('max_features', ['auto', 0.9, 0.8, 0.7, 0.6, 0.5, 0.4]),
'max_depth': hp.choice('max_depth', [None, 10, 8, 6])
}
elif model_name == 'GradientBoosting':
return {
'n_estimators': hp.randint('n_estimators', upper=300) + 150,
'max_features': hp.choice('max_features', ['auto', 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3]),
'learning_rate': hp.uniform('learning_rate', low=0.01, high=0.3),
'max_depth': hp.randint('max_depth', 10) + 5
}
elif model_name == 'Xgboost':
return {
'colsample_bytree': hp.uniform('colsample_bytree', low=0.2, high=0.7),
'gamma': hp.uniform('gamma', low=0.1, high=0.5),
'learning_rate': hp.uniform('learning_rate', low=0.02, high=0.2),
'max_depth': hp.randint('max_depth', 10) + 5,
'n_estimators': hp.randint('n_estimators', 300) + 150,
'subsample': hp.uniform('subsample', 0.2, 0.8)
}
elif model_name == 'LightGbm':
return {
'num_leaves': hp.randint('num_leaves', 40) + 5,
'min_child_samples': hp.randint('min_child_samples', 400) + 100,
'min_child_weight': hp.choice('min_child_weight', [1e-5, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2, 1e3, 1e4]),
'subsample': hp.uniform('subsample', low=0.2, high=0.8),
'colsample_bytree': hp.uniform('colsample_bytree', low=0.4, high=0.6),
'reg_alpha': hp.choice('reg_alpha', [0, 1e-1, 1, 2, 5, 7, 10, 50, 100]),
'reg_lambda': hp.choice('reg_lambda', [0, 1e-1, 1, 5, 10, 20, 50, 100]),
'n_estimators': hp.randint('n_estimators', 300) + 150
}
elif model_name == 'NeuralNetwork':
return {
'learning_rate_init': hp.loguniform('learning_rate_init', low=np.log(5e-5), high=np.log(2e-2)),
'alpha': hp.uniform('alpha', low=1e-6, high=1e3)
}
def create_folds(self, X, y):
"""
X columns : eid + features except target
y columns : eid + target
"""
X_eid = X.drop_duplicates('eid')
y_eid = y.drop_duplicates('eid')
eids = X_eid.eid
# Kfold on the eid, then regroup all ids
inner_cv = KFold(n_splits=self.inner_splits, shuffle=False, random_state=0)
list_test_folds = [elem[1] for elem in inner_cv.split(X_eid, y_eid)]
list_test_folds_eid = [eids[elem].values for elem in list_test_folds]
list_test_folds_id = [X.index[X.eid.isin(list_test_folds_eid[elem])].values for elem in
range(len(list_test_folds_eid))]
return list_test_folds_id
def optimize_hyperparameters(self, X, y, scoring):
"""
input X : dataframe with features + eid
input y : dataframe with target + eid
"""
if 'instance' in X.columns:
X = X.drop(columns=['instance'])
if 'instance' in y.columns:
y = y.drop(columns=['instance'])
list_test_folds_id = self.create_folds(X, y)
X = X.drop(columns=['eid'])
y = y.drop(columns=['eid'])
# Create custom Splits
list_test_folds_id_index = [np.array([X.index.get_loc(elem) for elem in list_test_folds_id[fold_num]])
for fold_num in range(len(list_test_folds_id))]
test_folds = np.zeros(len(X), dtype='int')
for fold_count in range(len(list_test_folds_id)):
test_folds[list_test_folds_id_index[fold_count]] = fold_count
inner_cv = PredefinedSplit(test_fold=test_folds)
list_best_params = {}
list_best_score = {}
objective, model_name = None, None
for model_name in self.models:
def objective(hyperparameters):
estimator_ = self.get_model(model_name, hyperparameters)
pipeline = Pipeline([('scaler', StandardScaler()), ('estimator', estimator_)])
scores = cross_validate(pipeline, X.values, y, scoring=scoring, cv=inner_cv, n_jobs=self.inner_splits)
return {'status': STATUS_OK, 'loss': -scores['test_score'].mean(),
'attachments': {'split_test_scores_and_params': (scores['test_score'], hyperparameters)}}
space = self.get_hyper_distribution(model_name)
trials = Trials()
best = fmin(objective, space, algo=tpe.suggest, max_evals=self.n_iter, trials=trials)
best_params = space_eval(space, best)
list_best_params[model_name] = best_params
list_best_score[model_name] = - min(trials.losses())
# Recover best between all models :
best_model = max(list_best_score.keys(), key=(lambda k: list_best_score[k]))
best_model_hyp = list_best_params[best_model]
# Recreate best estim :
estim = self.get_model(best_model, best_model_hyp)
pipeline_best = Pipeline([('scaler', StandardScaler()), ('estimator', estim)])
pipeline_best.fit(X.values, y)
return pipeline_best
"""
Useful for EnsemblesPredictions. This function needs to be global to allow pool to pickle it.
"""
def compute_ensemble_folds(ensemble_inputs):
if len(ensemble_inputs[1]) < 100:
print('Small sample size:' + str(len(ensemble_inputs[1])))
n_inner_splits = 5
else:
n_inner_splits = 10
# Can use different models: models=['ElasticNet', 'LightGBM', 'NeuralNetwork']
cv = InnerCV(models=['ElasticNet'], inner_splits=n_inner_splits, n_iter=30)
model = cv.optimize_hyperparameters(ensemble_inputs[0], ensemble_inputs[1], scoring='r2')
return model
class EnsemblesPredictions(Metrics):
"""
Hierarchically builds ensemble models from the already existing predictions.
"""
def __init__(self, target=None, pred_type=None, regenerate_models=False):
# Parameters
Metrics.__init__(self)
self.target = target
self.pred_type = pred_type
self.folds = ['val', 'test']
self.regenerate_models = regenerate_models
self.ensembles_performance_cutoff_percent = 0.5
self.parameters = {'target': target, 'organ': '*', 'view': '*', 'transformation': '*', 'architecture': '*',
'n_fc_layers': '*', 'n_fc_nodes': '*', 'optimizer': '*', 'learning_rate': '*',
'weight_decay': '*', 'dropout_rate': '*', 'data_augmentation_factor': '*'}
self.version = self._parameters_to_version(self.parameters)
self.main_metric_name = self.dict_main_metrics_names[target]
self.init_perf = -np.Inf if self.main_metrics_modes[self.main_metric_name] == 'max' else np.Inf
path_perf = self.path_data + 'PERFORMANCES_tuned_ranked_' + pred_type + '_' + target + '_val.csv'
self.Performances = pd.read_csv(path_perf).set_index('version', drop=False)
self.Performances['organ'] = self.Performances['organ'].astype(str)
self.list_ensemble_levels = ['transformation', 'view', 'organ']
self.PREDICTIONS = {}
self.weights_by_category = None
self.weights_by_ensembles = None
self.N_ensemble_CV_split = 10
self.instancesS = {'instances': ['01', '1.5x', '23'], 'eids': ['*']}
self.instances_names_to_numbers = {'01': ['0', '1'], '1.5x': ['1.5', '1.51', '1.52', '1.53', '1.54'],
'23': ['2', '3'], '*': ['*']}
self.INSTANCES_DATASETS = {
'01': ['Eyes', 'Hearing', 'Lungs', 'Arterial', 'Musculoskeletal', 'Biochemistry', 'ImmuneSystem'],
'1.5x': ['PhysicalActivity'],
'23': ['Brain', 'Arterial', 'Heart', 'Abdomen', 'Musculoskeletal'],
'*': ['Brain', 'Eyes', 'Hearing', 'Lungs', 'Arterial', 'Heart', 'Abdomen', 'Musculoskeletal',
'PhysicalActivity', 'ImmuneSystem']
}
# Get rid of columns and rows for the versions for which all samples as NANs
@staticmethod
def _drop_na_pred_versions(PREDS, Performances):
# Select the versions for which only NAs are available
pred_versions = [col for col in PREDS['val'].columns.values if 'pred_' in col]
to_drop = []
for pv in pred_versions:
for fold in PREDS.keys():
if PREDS[fold][pv].notna().sum() == 0:
to_drop.append(pv)
break
# Drop the corresponding columns from preds, and rows from performances
index_to_drop = [p.replace('pred_', '') for p in to_drop if '*' not in p]
for fold in PREDS.keys():
PREDS[fold].drop(to_drop, axis=1, inplace=True)
return Performances.drop(index_to_drop)
def load_data(self):
for fold in self.folds:
self.PREDICTIONS[fold] = pd.read_csv(
self.path_data + 'PREDICTIONS_tuned_' + self.pred_type + '_' + self.target + '_' + fold + '.csv')
def _build_single_ensemble(self, PREDICTIONS, version):
# Drop columns that are exclusively NaNs
all_nan = PREDICTIONS['val'].isna().all() | PREDICTIONS['test'].isna().all()
non_nan_cols = all_nan[~all_nan.values].index
for fold in self.folds:
PREDICTIONS[fold] = PREDICTIONS[fold][non_nan_cols]
Predictions = PREDICTIONS['val']
# Select the columns for the model
ensemble_preds_cols = [col for col in Predictions.columns.values if
bool(re.compile('pred_' + version).match(col))]
# If only one model in the ensemble, just copy the column. Otherwise build the ensemble model
if len(ensemble_preds_cols) == 1:
for fold in self.folds:
PREDICTIONS[fold]['pred_' + version] = PREDICTIONS[fold][ensemble_preds_cols[0]]
else:
# Initiate the dictionaries
PREDICTIONS_OUTERFOLDS = {}
ENSEMBLE_INPUTS = {}
for outer_fold in self.outer_folds:
# take the subset of the rows that correspond to the outer_fold
PREDICTIONS_OUTERFOLDS[outer_fold] = {}
XS_outer_fold = {}
YS_outer_fold = {}
dict_fold_to_outer_folds = {
'val': [float(outer_fold)],
'test': [(float(outer_fold) + 1) % self.n_CV_outer_folds],
'train': [float(of) for of in self.outer_folds
if float(of) not in [float(outer_fold), (float(outer_fold) + 1) % self.n_CV_outer_folds]]
}
for fold in self.folds:
PREDICTIONS_OUTERFOLDS[outer_fold][fold] = \
PREDICTIONS[fold][PREDICTIONS[fold]['outer_fold'].isin(dict_fold_to_outer_folds[fold])]
PREDICTIONS_OUTERFOLDS[outer_fold][fold] = PREDICTIONS_OUTERFOLDS[outer_fold][fold][
['id', 'eid', 'instance', self.target] + ensemble_preds_cols].dropna()
X = PREDICTIONS_OUTERFOLDS[outer_fold][fold][['id', 'eid', 'instance'] + ensemble_preds_cols]
X.set_index('id', inplace=True)
XS_outer_fold[fold] = X
y = PREDICTIONS_OUTERFOLDS[outer_fold][fold][['id', 'eid', self.target]]
y.set_index('id', inplace=True)
YS_outer_fold[fold] = y
ENSEMBLE_INPUTS[outer_fold] = [XS_outer_fold['val'], YS_outer_fold['val']]
# Build ensemble model using ElasticNet and/or LightGBM, Neural Network.
PREDICTIONS_ENSEMBLE = {}
pool = Pool(self.N_ensemble_CV_split)
MODELS = pool.map(compute_ensemble_folds, list(ENSEMBLE_INPUTS.values()))
pool.close()
pool.join()
# Concatenate all outer folds
for outer_fold in self.outer_folds:
for fold in self.folds:
X = PREDICTIONS_OUTERFOLDS[outer_fold][fold][ensemble_preds_cols]
PREDICTIONS_OUTERFOLDS[outer_fold][fold]['pred_' + version] = MODELS[int(outer_fold)].predict(X)
PREDICTIONS_OUTERFOLDS[outer_fold][fold]['outer_fold'] = float(outer_fold)
df_outer_fold = PREDICTIONS_OUTERFOLDS[outer_fold][fold][['id', 'outer_fold',
'pred_' + version]]
# Initiate, or append if some previous outerfolds have already been concatenated
if fold not in PREDICTIONS_ENSEMBLE.keys():
PREDICTIONS_ENSEMBLE[fold] = df_outer_fold
else:
PREDICTIONS_ENSEMBLE[fold] = PREDICTIONS_ENSEMBLE[fold].append(df_outer_fold)
# Add the ensemble predictions to the dataframe
for fold in self.folds:
if fold == 'train':
PREDICTIONS[fold] = PREDICTIONS[fold].merge(PREDICTIONS_ENSEMBLE[fold], how='outer',
on=['id', 'outer_fold'])
else:
PREDICTIONS_ENSEMBLE[fold].drop('outer_fold', axis=1, inplace=True)
PREDICTIONS[fold] = PREDICTIONS[fold].merge(PREDICTIONS_ENSEMBLE[fold], how='outer', on=['id'])
def _build_single_ensemble_wrapper(self, version, ensemble_level):
print('Building the ensemble model ' + version)
pred_version = 'pred_' + version
# Evaluate if the ensemble model should be built
# 1 - separately on instance 0-1, 1.5 and 2-3 (for ensemble at the top level, since overlap between models is 0)
# 2 - piece by piece on each outer_fold
# 1-Compute instances 0-1, 1.5 and 2-3 separately
if ensemble_level == 'organ':
for fold in self.folds:
self.PREDICTIONS[fold][pred_version] = np.nan
# Add an ensemble for each instances (01, 1.5x, and 23)
if self.pred_type == 'instances':
for instances_names in self.instancesS[self.pred_type]:
pv = 'pred_' + version.split('_')[0] + '_*instances' + instances_names + '_' + \
'_'.join(version.split('_')[2:])
self.PREDICTIONS[fold][pv] = np.nan
for instances_names in self.instancesS[self.pred_type]:
print('Building final ensemble model for samples in the instances: ' + instances_names)
# Take subset of rows and columns
instances = self.instances_names_to_numbers[instances_names]
instances_datasets = self.INSTANCES_DATASETS[instances_names]
versions = \
[col.replace('pred_', '') for col in self.PREDICTIONS['val'].columns if 'pred_' in col]
instances_versions = [version for version in versions
if any(dataset in version for dataset in instances_datasets)]
cols_to_keep = self.id_vars + self.demographic_vars + \
['pred_' + version for version in instances_versions]
PREDICTIONS = {}
for fold in self.folds:
PREDICTIONS[fold] = self.PREDICTIONS[fold][self.PREDICTIONS[fold].instance.isin(instances)]
PREDICTIONS[fold] = PREDICTIONS[fold][cols_to_keep]
self._build_single_ensemble(PREDICTIONS, version)
# Print a quick performance estimation for each instance(s)
df_model = PREDICTIONS['test'][[self.target, 'pred_' + version]].dropna()
print(instances_names)
print(self.main_metric_name + ' for instance(s) ' + instances_names + ': ' +
str(r2_score(df_model[self.target], df_model['pred_' + version])))
print('The sample size is ' + str(len(df_model.index)) + '.')
# Add the predictions to the dataframe, chunck by chunk, instances by instances
for fold in self.folds:
self.PREDICTIONS[fold][pred_version][self.PREDICTIONS[fold].instance.isin(instances)] = \
PREDICTIONS[fold][pred_version].values
# Add an ensemble for the instance(s) only
if self.pred_type == 'instances':
pv = 'pred_' + version.split('_')[0] + '_*instances' + instances_names + '_' + \
'_'.join(version.split('_')[2:])
self.PREDICTIONS[fold][pv][self.PREDICTIONS[fold].instance.isin(instances)] = \
PREDICTIONS[fold][pred_version].values
# Add three extra ensemble models for eids, to allow larger sample sizes for GWAS purposes
if self.pred_type == 'eids':
for instances_names in ['01', '1.5x', '23']:
print('Building final sub-ensemble model for samples in the instances: ' + instances_names)
# Keep only relevant columns
instances_datasets = self.INSTANCES_DATASETS[instances_names]
versions = \
[col.replace('pred_', '') for col in self.PREDICTIONS['val'].columns if 'pred_' in col]
instances_versions = [version for version in versions
if any(dataset in version for dataset in instances_datasets)]
cols_to_keep = self.id_vars + self.demographic_vars + \
['pred_' + version for version in instances_versions]
PREDICTIONS = {}
for fold in self.folds:
PREDICTIONS[fold] = self.PREDICTIONS[fold].copy()
PREDICTIONS[fold] = PREDICTIONS[fold][cols_to_keep]
self._build_single_ensemble(PREDICTIONS, version)
# Print a quick performance estimation for each instance(s)
df_model = PREDICTIONS['test'][[self.target, 'pred_' + version]].dropna()
print(instances_names)
print(self.main_metric_name + ' for instance(s) ' + instances_names + ': ' +
str(r2_score(df_model[self.target], df_model['pred_' + version])))
print('The sample size is ' + str(len(df_model.index)) + '.')
# Add the predictions to the dataframe
pv = 'pred_' + version.split('_')[0] + '_*instances' + instances_names + '_' + \
'_'.join(version.split('_')[2:])
for fold in self.folds:
self.PREDICTIONS[fold][pv] = PREDICTIONS[fold][pred_version].values
# 2-Compute fold by fold
else:
self._build_single_ensemble(self.PREDICTIONS, version)
# build and save a dataset for this specific ensemble model
for fold in self.folds:
df_single_ensemble = self.PREDICTIONS[fold][['id', 'outer_fold', pred_version]]
df_single_ensemble.rename(columns={pred_version: 'pred'}, inplace=True)
df_single_ensemble.dropna(inplace=True, subset=['pred'])
df_single_ensemble.to_csv(self.path_data + 'Predictions_' + self.pred_type + '_' + version + '_' + fold +
'.csv', index=False)
# Add extra ensembles at organ level
if ensemble_level == 'organ':
for instances_names in ['01', '1.5x', '23']:
pv = 'pred_' + version.split('_')[0] + '_*instances' + instances_names + '_' + \
'_'.join(version.split('_')[2:])
version_instances = version.split('_')[0] + '_*instances' + instances_names + '_' + \
'_'.join(version.split('_')[2:])
df_single_ensemble = self.PREDICTIONS[fold][['id', 'outer_fold', pv]]
df_single_ensemble.rename(columns={pv: 'pred'}, inplace=True)
df_single_ensemble.dropna(inplace=True, subset=['pred'])
df_single_ensemble.to_csv(self.path_data + 'Predictions_' + self.pred_type + '_' +
version_instances + '_' + fold + '.csv', index=False)
def _recursive_ensemble_builder(self, Performances_grandparent, parameters_parent, version_parent,
list_ensemble_levels_parent):
# Compute the ensemble models for the children first, so that they can be used for the parent model
Performances_parent = Performances_grandparent[
Performances_grandparent['version'].isin(
fnmatch.filter(Performances_grandparent['version'], version_parent))]
# if the last ensemble level has not been reached, go down one level and create a branch for each child.
# Otherwise the leaf has been reached
if len(list_ensemble_levels_parent) > 0:
list_ensemble_levels_child = list_ensemble_levels_parent.copy()
ensemble_level = list_ensemble_levels_child.pop()
list_children = Performances_parent[ensemble_level].unique()
for child in list_children:
parameters_child = parameters_parent.copy()
parameters_child[ensemble_level] = child
version_child = self._parameters_to_version(parameters_child)
# recursive call to the function
self._recursive_ensemble_builder(Performances_parent, parameters_child, version_child,
list_ensemble_levels_child)
else:
ensemble_level = None
# compute the ensemble model for the parent
# Check if ensemble model has already been computed. If it has, load the predictions. If it has not, compute it.
if not self.regenerate_models and \
os.path.exists(self.path_data + 'Predictions_' + self.pred_type + '_' + version_parent + '_test.csv'):
print('The model ' + version_parent + ' has already been computed. Loading it...')
for fold in self.folds:
df_single_ensemble = pd.read_csv(self.path_data + 'Predictions_' + self.pred_type + '_' +
version_parent + '_' + fold + '.csv')
df_single_ensemble.rename(columns={'pred': 'pred_' + version_parent}, inplace=True)
# Add the ensemble predictions to the dataframe
if fold == 'train':
self.PREDICTIONS[fold] = self.PREDICTIONS[fold].merge(df_single_ensemble, how='outer',
on=['id', 'outer_fold'])
else:
df_single_ensemble.drop(columns=['outer_fold'], inplace=True)
self.PREDICTIONS[fold] = self.PREDICTIONS[fold].merge(df_single_ensemble, how='outer', on=['id'])
# Add the extra ensemble models at the 'organ' level
if ensemble_level == 'organ':
if self.pred_type == 'instances':
instances = self.instancesS[self.pred_type]
else:
instances = ['01', '23']
for instances_names in instances:
pv = 'pred_' + version_parent.split('_')[0] + '_*instances' + instances_names + '_' + \
'_'.join(version_parent.split('_')[2:])
version_instances = version_parent.split('_')[0] + '_*instances' + instances_names + '_' + \
'_'.join(version_parent.split('_')[2:])
df_single_ensemble = pd.read_csv(self.path_data + 'Predictions_' + self.pred_type + '_' +
version_instances + '_' + fold + '.csv')
df_single_ensemble.rename(columns={'pred': pv}, inplace=True)
if fold == 'train':
self.PREDICTIONS[fold] = self.PREDICTIONS[fold].merge(df_single_ensemble, how='outer',
on=['id', 'outer_fold'])
else:
df_single_ensemble.drop(columns=['outer_fold'], inplace=True)
self.PREDICTIONS[fold] = self.PREDICTIONS[fold].merge(df_single_ensemble, how='outer',
on=['id'])
else:
self._build_single_ensemble_wrapper(version_parent, ensemble_level)
# Print a quick performance estimation
df_model = self.PREDICTIONS['test'][[self.target, 'pred_' + version_parent]].dropna()
print(self.main_metric_name + ': ' + str(r2_score(df_model[self.target], df_model['pred_' + version_parent])))
print('The sample size is ' + str(len(df_model.index)) + '.')
def generate_ensemble_predictions(self):
self._recursive_ensemble_builder(self.Performances, self.parameters, self.version, self.list_ensemble_levels)
# Reorder the columns alphabetically
for fold in self.folds:
pred_versions = [col for col in self.PREDICTIONS[fold].columns if 'pred_' in col]
pred_versions.sort()
self.PREDICTIONS[fold] = self.PREDICTIONS[fold][self.id_vars + self.demographic_vars + pred_versions]
# Displaying the R2s
for fold in self.folds:
versions = [col.replace('pred_', '') for col in self.PREDICTIONS[fold].columns if 'pred_' in col]
r2s = []
for version in versions:
df = self.PREDICTIONS[fold][[self.target, 'pred_' + version]].dropna()
r2s.append(r2_score(df[self.target], df['pred_' + version]))
R2S = pd.DataFrame({'version': versions, 'R2': r2s})
R2S.sort_values(by='R2', ascending=False, inplace=True)
print(fold + ' R2s for each model: ')
print(R2S)
def save_predictions(self):
for fold in self.folds:
self.PREDICTIONS[fold].to_csv(self.path_data + 'PREDICTIONS_withEnsembles_' + self.pred_type + '_' +
self.target + '_' + fold + '.csv', index=False)
class ResidualsGenerate(Basics):
"""
Computes accelerated aging phenotypes (Residuals, corrected for residuals bias with respect to age)
"""
def __init__(self, target=None, fold=None, pred_type=None, debug_mode=False):
# Parameters
Basics.__init__(self)
self.target = target
self.fold = fold
self.pred_type = pred_type
self.debug_mode = debug_mode
self.Residuals = pd.read_csv(self.path_data + 'PREDICTIONS_withEnsembles_' + pred_type + '_' + target + '_' +
fold + '.csv')
self.list_models = [col_name.replace('pred_', '') for col_name in self.Residuals.columns.values
if 'pred_' in col_name]
def generate_residuals(self):
list_models = [col_name.replace('pred_', '') for col_name in self.Residuals.columns.values
if 'pred_' in col_name]
for model in list_models:
print('Generating residuals for model ' + model)
df_model = self.Residuals[['Age', 'pred_' + model]]
no_na_indices = [not b for b in df_model['pred_' + model].isna()]
df_model.dropna(inplace=True)
if (len(df_model.index)) > 0:
age = df_model.loc[:, ['Age']]
res = df_model['Age'] - df_model['pred_' + model]
regr = LinearRegression()
regr.fit(age, res)
res_correction = regr.predict(age)
res_corrected = res - res_correction
self.Residuals.loc[no_na_indices, 'pred_' + model] = res_corrected
# debug plot
if self.debug_mode:
print('Bias for the residuals ' + model, regr.coef_)
plt.scatter(age, res)
plt.scatter(age, res_corrected)
regr2 = LinearRegression()
regr2.fit(age, res_corrected)
print('Coefficients after: \n', regr2.coef_)
self.Residuals.rename(columns=lambda x: x.replace('pred_', 'res_'), inplace=True)
def save_residuals(self):
self.Residuals.to_csv(self.path_data + 'RESIDUALS_' + self.pred_type + '_' + self.target + '_' + self.fold +
'.csv', index=False)
class ResidualsCorrelations(Basics):
"""
Computes the phenotypic correlation between aging dimensions.
"""
def __init__(self, target=None, fold=None, pred_type=None, debug_mode=False):
Basics.__init__(self)
self.target = target
self.fold = fold
self.pred_type = pred_type
self.debug_mode = debug_mode
if debug_mode:
self.n_bootstrap_iterations_correlations = 10
else:
self.n_bootstrap_iterations_correlations = 1000
self.Residuals = None
self.CORRELATIONS = {}
self.Correlation_sample_sizes = None
def preprocessing(self):
# load data
Residuals = pd.read_csv(self.path_data + 'RESIDUALS_' + self.pred_type + '_' + self.target + '_' + self.fold +
'.csv')
# Format the dataframe
Residuals_only = Residuals[[col_name for col_name in Residuals.columns.values if 'res_' in col_name]]
Residuals_only.rename(columns=lambda x: x.replace('res_' + self.target + '_', ''), inplace=True)
# Reorder the columns to make the correlation matrix more readable
# Need to temporarily rename '?' because its ranking differs from the '*' and ',' characters
Residuals_only.columns = [col_name.replace('?', ',placeholder') for col_name in Residuals_only.columns.values]
Residuals_only = Residuals_only.reindex(sorted(Residuals_only.columns), axis=1)
Residuals_only.columns = [col_name.replace(',placeholder', '?') for col_name in Residuals_only.columns.values]
self.Residuals = Residuals_only
def _bootstrap_correlations(self):
names = self.Residuals.columns.values
results = []
for i in range(self.n_bootstrap_iterations_correlations):
if (i + 1) % 100 == 0:
print('Bootstrap iteration ' + str(i + 1) + ' out of ' + str(self.n_bootstrap_iterations_correlations))
data_i = resample(self.Residuals, replace=True, n_samples=len(self.Residuals.index))
results.append(np.array(data_i.corr()))
results = np.array(results)
RESULTS = {}
for op in ['mean', 'std']:
results_op = pd.DataFrame(getattr(np, op)(results, axis=0))
results_op.index = names
results_op.columns = names
RESULTS[op] = results_op
self.CORRELATIONS['_sd'] = RESULTS['std']
def generate_correlations(self):
# Generate the correlation matrix
self.CORRELATIONS[''] = self.Residuals.corr()
# Gerate the std by bootstrapping
self._bootstrap_correlations()
# Merge both as a dataframe of strings
self.CORRELATIONS['_str'] = self.CORRELATIONS[''].round(3).applymap(str) \
+ '+-' + self.CORRELATIONS['_sd'].round(3).applymap(str)
# Print correlations
print(self.CORRELATIONS[''])
# Generate correlation sample sizes
self.Residuals[~self.Residuals.isna()] = 1
self.Residuals[self.Residuals.isna()] = 0
self.Correlation_sample_sizes = self.Residuals.transpose() @ self.Residuals
def save_correlations(self):
self.Correlation_sample_sizes.to_csv(self.path_data + 'ResidualsCorrelations_samplesizes_' + self.pred_type +
'_' + self.target + '_' + self.fold + '.csv', index=True)
for mode in self.modes:
self.CORRELATIONS[mode].to_csv(self.path_data + 'ResidualsCorrelations' + mode + '_' + self.pred_type +
'_' + self.target + '_' + self.fold + '.csv', index=True)
class PerformancesSurvival(Metrics):
"""
Computes the performances in terms of survival prediction using biological age phenotypes as survival predictors.
"""
def __init__(self, target=None, fold=None, pred_type=None, debug_mode=None):
Metrics.__init__(self)
self.target = target
self.fold = fold
self.pred_type = pred_type
if debug_mode:
self.n_bootstrap_iterations = 3
else:
self.n_bootstrap_iterations = 1000
self.PERFORMANCES = None
self.Survival = None
self.SURV = None
def _bootstrap_c_index(self, data):
results = []
for i in range(self.n_bootstrap_iterations):
data_i = resample(data, replace=True, n_samples=len(data.index))
if len(data_i['Death'].unique()) == 2:
results.append(concordance_index(data_i['Age'], -data_i['pred'], data_i['Death']))
'''
To debug if this part fails again
try:
results.append(concordance_index(data_i['Age'], -data_i['pred'], data_i['Death']))
except:
print('WEIRD, should not happen! Printing the df')
print(data_i)
self.data_i_debug = data_i
break
'''
if len(results) > 0:
results_mean = np.mean(results)
results_std = np.std(results)
else:
results_mean = np.nan
results_std = np.nan
return results_mean, results_std
def load_data(self):
# Load and preprocess PERFORMANCES
self.PERFORMANCES = pd.read_csv(self.path_data + 'PERFORMANCES_withEnsembles_alphabetical_' +
self.pred_type + '_' + self.target + '_' + self.fold + '.csv')
self.PERFORMANCES.set_index('version', drop=False, inplace=True)
self.PERFORMANCES.index.name = 'index'
for inner_fold in ['all'] + [str(i) for i in range(10)]:
for metric in ['C-Index', 'C-Index-difference']:
for mode in self.modes:
self.PERFORMANCES[metric + mode + '_' + inner_fold] = np.nan
Residuals = pd.read_csv(
self.path_data + 'RESIDUALS_' + self.pred_type + '_' + self.target + '_' + self.fold + '.csv')
Survival = pd.read_csv(self.path_data + 'data_survival.csv')
self.Survival = pd.merge(Survival[['id', 'FollowUpTime', 'Death']], Residuals, on='id')
data_folds = pd.read_csv(self.path_data + 'data-features_eids.csv', usecols=['eid', 'outer_fold'])
self.SURV = {}
for i in range(10):
self.SURV[i] = \
self.Survival[self.Survival['eid'].isin(data_folds['eid'][data_folds['outer_fold'] == i].values)]
def compute_c_index_and_save_data(self):
models = [col.replace('res_' + self.target, self.target) for col in self.Survival.columns if 'res_' in col]
for k, model in enumerate(models):
if k % 30 == 0:
print('Computing CI for the ' + str(k) + 'th model out of ' + str(len(models)) + ' models.')
# Load Performances dataframes
PERFS = {}
for mode in self.modes:
PERFS[mode] = pd.read_csv('../data/Performances_' + self.pred_type + '_' + model + '_' + self.fold +
mode + '.csv')
PERFS[mode].set_index('outer_fold', drop=False, inplace=True)
PERFS[mode]['C-Index'] = np.nan
PERFS[mode]['C-Index-difference'] = np.nan
df_model = self.Survival[['FollowUpTime', 'Death', 'Age', 'res_' + model]].dropna()
df_model.rename(columns={'res_' + model: 'pred'}, inplace=True)
# Compute CI over all samples
if len(df_model['Death'].unique()) == 2:
ci_model = concordance_index(df_model['FollowUpTime'], -(df_model['Age'] - df_model['pred']),
df_model['Death'])
ci_age = concordance_index(df_model['FollowUpTime'], -df_model['Age'], df_model['Death'])
ci_diff = ci_model - ci_age
PERFS[''].loc['all', 'C-Index'] = ci_model
PERFS[''].loc['all', 'C-Index-difference'] = ci_diff
self.PERFORMANCES.loc[model, 'C-Index_all'] = ci_model
self.PERFORMANCES.loc[model, 'C-Index-difference_all'] = ci_diff
_, ci_sd = self._bootstrap_c_index(df_model)
PERFS['_sd'].loc['all', 'C-Index'] = ci_sd
PERFS['_sd'].loc['all', 'C-Index-difference'] = ci_sd
self.PERFORMANCES.loc[model, 'C-Index_sd_all'] = ci_sd
self.PERFORMANCES.loc[model, 'C-Index-difference_sd_all'] = ci_sd
# Compute CI over each fold
for i in range(10):
df_model_i = self.SURV[i][['FollowUpTime', 'Death', 'Age', 'res_' + model]].dropna()
df_model_i.rename(columns={'res_' + model: 'pred'}, inplace=True)
if len(df_model_i['Death'].unique()) == 2:
ci_model_i = concordance_index(df_model_i['FollowUpTime'],
-(df_model_i['Age'] - df_model_i['pred']),
df_model_i['Death'])
ci_age_i = concordance_index(df_model_i['FollowUpTime'], -df_model_i['Age'], df_model_i['Death'])
ci_diff_i = ci_model_i - ci_age_i
PERFS[''].loc[str(i), 'C-Index'] = ci_model_i
PERFS[''].loc[str(i), 'C-Index-difference'] = ci_diff_i
self.PERFORMANCES.loc[model, 'C-Index_' + str(i)] = ci_model_i
self.PERFORMANCES.loc[model, 'C-Index-difference_' + str(i)] = ci_diff_i
_, ci_i_sd = self._bootstrap_c_index(df_model_i)
PERFS['_sd'].loc[str(i), 'C-Index'] = ci_i_sd
PERFS['_sd'].loc[str(i), 'C-Index-difference'] = ci_i_sd
self.PERFORMANCES.loc[model, 'C-Index_sd_' + str(i)] = ci_i_sd
self.PERFORMANCES.loc[model, 'C-Index-difference_sd_' + str(i)] = ci_i_sd
# Compute sd using all folds
ci_str = round(PERFS[''][['C-Index', 'C-Index-difference']], 3).astype(str) + '+-' + \
round(PERFS['_sd'][['C-Index', 'C-Index-difference']], 3).astype(str)
PERFS['_str'][['C-Index', 'C-Index-difference']] = ci_str
for col in ['C-Index', 'C-Index-difference']:
cols = [col + '_str_' + str(i) for i in range(10)]
# Fill model's performance matrix
ci_std_lst = PERFS['_str'].loc['all', col].split('+-')
ci_std_lst.insert(1, str(round(PERFS[''][col].iloc[1:].std(), 3)))
ci_std_str = '+-'.join(ci_std_lst)
PERFS['_str'].loc['all', col] = ci_std_str
# Fill global performances matrix
self.PERFORMANCES.loc[model, cols] = ci_str[col].values[1:]
self.PERFORMANCES.loc[model, col + '_str_all'] = ci_std_str
# Save new performances
for mode in self.modes:
PERFS[mode].to_csv('../data/Performances_' + self.pred_type + '_withCI_' + model + '_' + self.fold +
mode + '.csv')
# Ranking, printing and saving
# Sort by alphabetical order
Performances_alphabetical = self.PERFORMANCES.sort_values(by='version')
Performances_alphabetical.to_csv(self.path_data + 'PERFORMANCES_withEnsembles_withCI_alphabetical_' +
self.pred_type + '_' + self.target + '_' + self.fold + '.csv', index=False)
# Sort by C-Index difference, to print
cols_to_print = ['version', 'C-Index-difference_str_all']
Performances_ranked = self.PERFORMANCES.sort_values(by='C-Index-difference_all', ascending=False)
print('Performances of the models ranked by C-Index difference with C-Index based on age only,'
' on all the samples:')
print(Performances_ranked[cols_to_print])
# Sort by main metric, to save
sort_by = self.dict_main_metrics_names[self.target] + '_all'
sort_ascending = self.main_metrics_modes[self.dict_main_metrics_names[self.target]] == 'min'
Performances_ranked = self.PERFORMANCES.sort_values(by=sort_by, ascending=sort_ascending)
Performances_ranked.to_csv(self.path_data + 'PERFORMANCES_withEnsembles_withCI_withEnsembles_ranked_' +
self.pred_type + '_' + self.target + '_' + self.fold + '.csv', index=False)
# Save with ensembles
models_nonensembles = [idx for idx in Performances_alphabetical.index if '*' not in idx]
path_save = self.path_data + 'PERFORMANCES_withoutEnsembles_withCI_alphabetical_' + self.pred_type + '_' + \
self.target + '_' + self.fold + '.csv'
Performances_alphabetical.loc[models_nonensembles, :].to_csv(path_save, index=False)
Performances_ranked.loc[models_nonensembles, :].to_csv(path_save.replace('alphabetical', 'ranked'))
def print_key_results(self):
# Helper function
def compute_p_value(row):
sd = float(row['C-Index-difference_str_all'].split('+-')[1])
z = np.abs(row['C-Index-difference_all']) / sd
pv = norm.sf(abs(z)) * 2
return pv
# Preprocess the data
Performances = pd.read_csv(
self.path_data + 'PERFORMANCES_withEnsembles_withCI_alphabetical_' + self.pred_type + '_' +
self.target + '_' + self.fold + '.csv')
Performances.set_index('version', drop=False, inplace=True)
Perfs_CI = Performances[['version', 'C-Index_all', 'C-Index-difference_all',
'C-Index-difference_str_all']].sort_values(by='C-Index-difference_all')
Perfs_CI['C-Index_CA'] = Perfs_CI['C-Index_all'] - Perfs_CI['C-Index-difference_all']
Perfs_CI['p-value'] = Perfs_CI.apply(compute_p_value, axis=1)
# Select only models for which difference between biological age's CI and chronological age's CI is significant
Perfs_CI_significant = Perfs_CI[Perfs_CI['p-value'] < 0.05]
Perfs_CI_significant_FDR = Perfs_CI[Perfs_CI['p-value'] * len(Perfs_CI.index) < 0.05]
# Take the subset corresponding to the 11 main dimensions
main_dims = ['Brain', 'Eyes', 'Hearing', 'Lungs', 'Arterial', 'Heart', 'Abdomen', 'Musculoskeletal',
'PhysicalActivity', 'Biochemistry', 'ImmuneSystem']
main_rows = ['Age_' + dim + '_*' * 10 for dim in main_dims]
Perfs_CI_main = Perfs_CI.loc[main_rows, :]
Perfs_CI_main.sort_values(by='C-Index-difference_all', inplace=True)
# Select only models for which difference between biological age's CI and chronological age's CI is significant
Perfs_CI_main_significant = Perfs_CI_main[Perfs_CI_main['p-value'] < 0.05]
Perfs_CI_main_significant_FDR = Perfs_CI_main[Perfs_CI_main['p-value'] * len(Perfs_CI_main.index) < 0.05]
# Compute the statistics to compare biological ages and chronological age on all the dimensions
CI_diff_mean = Perfs_CI['C-Index-difference_all'].mean()
CI_diff_std = Perfs_CI['C-Index-difference_all'].std()
_t_stat_all, pv_all = ttest_rel(Perfs_CI['C-Index_all'], Perfs_CI['C-Index_CA'])
# Number of dimensions outperforming and underperforming compared to chronological age
n_CI_diff_positives = (Perfs_CI['C-Index-difference_all'] > 0).sum()
n_CI_diff_negatives = (Perfs_CI['C-Index-difference_all'] < 0).sum()
n_CI_diff_positives_significant = (Perfs_CI_significant['C-Index-difference_all'] > 0).sum()
n_CI_diff_negatives_significant = (Perfs_CI_significant['C-Index-difference_all'] < 0).sum()
n_CI_diff_positives_significant_FDR = (Perfs_CI_significant_FDR['C-Index-difference_all'] > 0).sum()
n_CI_diff_negatives_significant_FDR = (Perfs_CI_significant_FDR['C-Index-difference_all'] < 0).sum()
# print results
print('The mean CI difference over the ' + str(len(Perfs_CI.index)) + ' biological ages = ' +
str(round(CI_diff_mean, 3)) + '; standard deviation = ' + str(round(CI_diff_std, 3)) +
'; paired t-test p-value = ' + str(pv_all))
print('Out of the ' + str(len(Perfs_CI.index)) + ' dimensions, ' + str(n_CI_diff_positives) +
' dimensions outperform CA as survival predictors, and ' + str(n_CI_diff_negatives) +
' dimensions underperform.')
# Compute the statistics to compare biological ages and chronological age on the 11 main dimensions
CI_diff_main_mean = Perfs_CI_main['C-Index-difference_all'].mean()
CI_diff_main_std = Perfs_CI_main['C-Index-difference_all'].std()
_t_stat_main, pv_main = ttest_rel(Perfs_CI_main['C-Index_all'], Perfs_CI_main['C-Index_CA'])
# Number of dimensions outperforming and underperforming compared to chronological age
n_CI_diff_main_positives = (Perfs_CI_main['C-Index-difference_all'] > 0).sum()
n_CI_diff_main_negatives = (Perfs_CI_main['C-Index-difference_all'] < 0).sum()
n_CI_diff_main_positives_significant = (Perfs_CI_main_significant['C-Index-difference_all'] > 0).sum()
n_CI_diff_main_negatives_significant = (Perfs_CI_main_significant['C-Index-difference_all'] < 0).sum()
n_CI_diff_main_positives_significant_FDR = (Perfs_CI_main_significant_FDR['C-Index-difference_all'] > 0).sum()
n_CI_diff_main_negatives_significant_FDR = (Perfs_CI_main_significant_FDR['C-Index-difference_all'] < 0).sum()
# print results
print('The mean CI difference over the ' + str(len(Perfs_CI_main.index)) + ' biological ages = ' +
str(round(CI_diff_main_mean, 3)) + '; standard deviation = ' + str(round(CI_diff_main_std, 3)) +
'; paired t-test p-value = ' + str(pv_main))
print('Out of the ' + str(len(Perfs_CI_main.index)) + ' main biological dimensions, ' + str(
n_CI_diff_main_positives) +
' dimensions outperform CA as survival predictors, and ' + str(n_CI_diff_main_negatives) +
' dimensions underperform.')
Perfs_CI_main[['version', 'C-Index-difference_all',
'C-Index-difference_str_all', 'C-Index_all', 'C-Index_CA']].sort_values(
by='C-Index-difference_all')
row_names = ['All', 'significant', 'FDR_significant']
col_names = ['All', '+', '-']
n_models = pd.DataFrame(np.empty((len(row_names), len(col_names),)))
n_models.index = row_names
n_models.columns = col_names
N_MODELS = {'All_dims': n_models.copy(), 'Main_dims': n_models.copy()}
best_models = n_models.drop(columns=['All'])
BEST_MODELS = {'All_dims': best_models.copy(), 'Main_dims': best_models.copy()}
BEST_CI_DIFFS = {'All_dims': best_models.copy(), 'Main_dims': best_models.copy()}
N_MODELS['All_dims'].loc[:, '+'] = \
[n_CI_diff_positives, n_CI_diff_positives_significant, n_CI_diff_positives_significant_FDR]
BEST_MODELS['All_dims'].loc[:, '+'] = [Perfs_CI['version'][len(Perfs_CI.index) - 1],
Perfs_CI_significant['version'][len(Perfs_CI_significant.index) - 1],
Perfs_CI_significant_FDR['version'][
len(Perfs_CI_significant_FDR.index) - 1]]
BEST_CI_DIFFS['All_dims'].loc[:, '+'] = \
[Perfs_CI['C-Index-difference_str_all'][len(Perfs_CI.index) - 1],
Perfs_CI_significant['C-Index-difference_str_all'][len(Perfs_CI_significant.index) - 1],
Perfs_CI_significant_FDR['C-Index-difference_str_all'][len(Perfs_CI_significant_FDR.index) - 1]]
N_MODELS['All_dims'].loc[:, '-'] = \
[n_CI_diff_negatives, n_CI_diff_negatives_significant, n_CI_diff_negatives_significant_FDR]
BEST_MODELS['All_dims'].loc[:, '-'] = [Perfs_CI['version'][0],
Perfs_CI_significant['version'][0],
Perfs_CI_significant_FDR['version'][0]]
BEST_CI_DIFFS['All_dims'].loc[:, '-'] = [Perfs_CI['C-Index-difference_str_all'][0],
Perfs_CI_significant['C-Index-difference_str_all'][0],
Perfs_CI_significant_FDR['C-Index-difference_str_all'][0]]
N_MODELS['All_dims']['All'] = N_MODELS['All_dims']['+'] + N_MODELS['All_dims']['-']
N_MODELS['Main_dims'].loc[:, '+'] = \
[n_CI_diff_main_positives, n_CI_diff_main_positives_significant, n_CI_diff_main_positives_significant_FDR]
BEST_MODELS['Main_dims'].loc[:, '+'] = \
[Perfs_CI_main['version'][len(Perfs_CI_main.index) - 1],
Perfs_CI_main_significant['version'][len(Perfs_CI_main_significant.index) - 1],
Perfs_CI_main_significant_FDR['version'][len(Perfs_CI_main_significant_FDR.index) - 1]]
BEST_CI_DIFFS['Main_dims'].loc[:, '+'] = \
[Perfs_CI_main['C-Index-difference_str_all'][len(Perfs_CI_main.index) - 1],
Perfs_CI_main_significant['C-Index-difference_str_all'][len(Perfs_CI_main_significant.index) - 1],
Perfs_CI_main_significant_FDR['C-Index-difference_str_all'][len(Perfs_CI_main_significant_FDR.index) - 1]]
N_MODELS['Main_dims'].loc[:, '-'] = \
[n_CI_diff_main_negatives, n_CI_diff_main_negatives_significant, n_CI_diff_main_negatives_significant_FDR]
BEST_MODELS['Main_dims'].loc[:, '-'] = [Perfs_CI_main['version'][0],
Perfs_CI_main_significant['version'][0],
Perfs_CI_main_significant_FDR['version'][0]]
BEST_CI_DIFFS['Main_dims'].loc[:, '-'] = [Perfs_CI_main['C-Index-difference_str_all'][0],
Perfs_CI_main_significant['C-Index-difference_str_all'][0],
Perfs_CI_main_significant_FDR['C-Index-difference_str_all'][0]]
N_MODELS['Main_dims']['All'] = N_MODELS['Main_dims']['+'] + N_MODELS['Main_dims']['-']
# Reformat to take into account that sometimes no model fits the criteria
for dims in ['All_dims', 'Main_dims']:
for sign in ['+', '-']:
for models in ['All', 'significant', 'FDR_significant']:
if N_MODELS[dims].loc[models, sign] == 0:
BEST_MODELS[dims].loc[models, sign] = ''
BEST_CI_DIFFS[dims].loc[models, sign] = ''
# Print results
# All dims
print('Number of aging dimensions, best models and associated CI differences for All dims: ')
print(N_MODELS['All_dims'])
print(BEST_MODELS['All_dims'])
print(BEST_CI_DIFFS['All_dims'])
print('Best model between All dims: ')
print(Perfs_CI_significant_FDR[['C-Index-difference_str_all', 'C-Index_all', 'C-Index_CA']].iloc[-1, :])
# Main dims
print('Number of aging dimensions, best models and associated CI differences for Main dims: ')
print(N_MODELS['Main_dims'])
print(BEST_MODELS['Main_dims'])
print(BEST_CI_DIFFS['Main_dims'])
print('Best model between Main dims: ')
print(Perfs_CI_main_significant_FDR[['C-Index-difference_str_all', 'C-Index_all', 'C-Index_CA']].iloc[-1, :])
class SelectBest(Metrics):
"""
For each aging main dimension and selected subdimensions, select the best performing model.
"""
def __init__(self, target=None, pred_type=None):
Metrics.__init__(self)
self.target = target
self.pred_type = pred_type
self.folds = ['test']
self.organs_with_suborgans = {'Brain': ['Cognitive', 'MRI'], 'Eyes': ['All', 'Fundus', 'OCT'],
'Arterial': ['PulseWaveAnalysis', 'Carotids'],
'Heart': ['ECG', 'MRI'], 'Abdomen': ['Liver', 'Pancreas'],
'Musculoskeletal': ['Spine', 'Hips', 'Knees', 'FullBody', 'Scalars'],
'Biochemistry': ['Urine', 'Blood']}
self.organs = []
self.best_models = []
self.PREDICTIONS = {}
self.RESIDUALS = {}
self.PERFORMANCES = {}
self.CORRELATIONS = {}
self.CORRELATIONS_SAMPLESIZES = {}
def _load_data(self):
for fold in self.folds:
path_pred = self.path_data + 'PREDICTIONS_withEnsembles_' + self.pred_type + '_' + self.target + '_' + \
fold + '.csv'
path_res = self.path_data + 'RESIDUALS_' + self.pred_type + '_' + self.target + '_' + fold + '.csv'
path_perf = self.path_data + 'PERFORMANCES_withEnsembles_withCI_ranked_' + self.pred_type + '_' + \
self.target + '_' + fold + '.csv'
path_corr = self.path_data + 'ResidualsCorrelations_str_' + self.pred_type + '_' + self.target + '_' + \
fold + '.csv'
self.PREDICTIONS[fold] = pd.read_csv(path_pred)
self.RESIDUALS[fold] = pd.read_csv(path_res)
self.PERFORMANCES[fold] = pd.read_csv(path_perf)
self.PERFORMANCES[fold].set_index('version', drop=False, inplace=True)
self.CORRELATIONS_SAMPLESIZES[fold] = pd.read_csv(self.path_data + 'ResidualsCorrelations_samplesizes_' +
self.pred_type + '_' + self.target + '_' + fold + '.csv',
index_col=0)
self.CORRELATIONS[fold] = {}
for mode in self.modes:
self.CORRELATIONS[fold][mode] = pd.read_csv(path_corr.replace('_str', mode), index_col=0)
def _select_versions(self):
# Load val performances
path_perf = self.path_data + 'PERFORMANCES_withEnsembles_withCI_ranked_' + self.pred_type + '_' + \
self.target + '_test.csv'
Performances = pd.read_csv(path_perf)
Performances.set_index('version', drop=False, inplace=True)
list_organs = Performances['organ'].unique()
list_organs.sort()
for organ in list_organs:
print('Selecting best model for ' + organ)
Perf_organ = Performances[Performances['organ'] == organ]
self.organs.append(organ)
self.best_models.append(Perf_organ['version'].values[0])
if organ in self.organs_with_suborgans.keys():
for view in self.organs_with_suborgans[organ]:
print('Selecting best model for ' + organ + view)
Perf_organview = Performances[(Performances['organ'] == organ) & (Performances['view'] == view)]
self.organs.append(organ + view)
self.best_models.append(Perf_organview['version'].values[0])
def _take_subsets(self):
base_cols = self.id_vars + self.demographic_vars
best_models_pred = ['pred_' + model for model in self.best_models]
best_models_res = ['res_' + model for model in self.best_models]
best_models_corr = ['_'.join(model.split('_')[1:]) for model in self.best_models]
for fold in self.folds:
self.PREDICTIONS[fold] = self.PREDICTIONS[fold].loc[:, base_cols + best_models_pred]
self.PREDICTIONS[fold].columns = base_cols + self.organs
self.RESIDUALS[fold] = self.RESIDUALS[fold].loc[:, base_cols + best_models_res]
self.RESIDUALS[fold].columns = base_cols + self.organs
self.PERFORMANCES[fold] = self.PERFORMANCES[fold].loc[self.best_models, :]
self.PERFORMANCES[fold].index = self.organs
self.CORRELATIONS_SAMPLESIZES[fold] = \
self.CORRELATIONS_SAMPLESIZES[fold].loc[best_models_corr, best_models_corr]
self.CORRELATIONS_SAMPLESIZES[fold].index = self.organs
self.CORRELATIONS_SAMPLESIZES[fold].columns = self.organs
for mode in self.modes:
self.CORRELATIONS[fold][mode] = self.CORRELATIONS[fold][mode].loc[best_models_corr, best_models_corr]
self.CORRELATIONS[fold][mode].index = self.organs
self.CORRELATIONS[fold][mode].columns = self.organs
def select_models(self):
self._load_data()
self._select_versions()
self._take_subsets()
def save_data(self):
for fold in self.folds:
path_pred = self.path_data + 'PREDICTIONS_bestmodels_' + self.pred_type + '_' + self.target + '_' + fold \
+ '.csv'
path_res = self.path_data + 'RESIDUALS_bestmodels_' + self.pred_type + '_' + self.target + '_' + fold + \
'.csv'
path_corr = self.path_data + 'ResidualsCorrelations_bestmodels_str_' + self.pred_type + '_' + self.target \
+ '_' + fold + '.csv'
path_perf = self.path_data + 'PERFORMANCES_bestmodels_ranked_' + self.pred_type + '_' + self.target + '_' \
+ fold + '.csv'
self.PREDICTIONS[fold].to_csv(path_pred, index=False)
self.RESIDUALS[fold].to_csv(path_res, index=False)
self.PERFORMANCES[fold].sort_values(by=self.dict_main_metrics_names[self.target] + '_all', ascending=False,
inplace=True)
self.PERFORMANCES[fold].to_csv(path_perf, index=False)
Performances_alphabetical = self.PERFORMANCES[fold].sort_values(by='version')
Performances_alphabetical.to_csv(path_perf.replace('ranked', 'alphabetical'), index=False)
for mode in self.modes:
self.CORRELATIONS[fold][mode].to_csv(path_corr.replace('_str', mode), index=True)
# Handy draft to print some key results
Perfs = pd.read_csv('../data/PERFORMANCES_withEnsembles_alphabetical_instances_Age_test.csv')
Perfs.set_index('version', drop=False, inplace=True)
# Take the subset corresponding to the 11 main dimensions
main_dims = ['Brain', 'Eyes', 'Hearing', 'Lungs', 'Arterial', 'Heart', 'Abdomen', 'Musculoskeletal',
'PhysicalActivity',
'Biochemistry', 'ImmuneSystem']
main_rows = ['Age_' + dim + '_*' * 10 for dim in main_dims]
Perfs_main = Perfs.loc[main_rows, :]
print('R-Squared for all dimensions = ' + str(round(Perfs['R-Squared_all'].mean(), 3)) + '; std = ' +
str(round(Perfs['R-Squared_all'].std(), 3)) + '; min = ' + str(round(Perfs['R-Squared_all'].min(), 3)) +
'; max = ' + str(round(Perfs['R-Squared_all'].max(), 3)))
print('RMSEs for all dimensions = ' + str(round(Perfs['RMSE_all'].mean(), 3)) + '; std = ' +
str(round(Perfs['RMSE_all'].std(), 3)) + '; min = ' + str(
round(Perfs['RMSE_all'].min(), 3)) + '; max = ' +
str(round(Perfs['RMSE_all'].max(), 3)))
print('R-Squared for main dimensions = ' + str(round(Perfs_main['R-Squared_all'].mean(), 3)) + '; std = ' +
str(round(Perfs_main['R-Squared_all'].std(), 3)) + '; min = ' + str(
round(Perfs_main['R-Squared_all'].min(), 3)) +
'; max = ' + str(round(Perfs_main['R-Squared_all'].max(), 3)))
print('RMSEs for main dimensions = ' + str(round(Perfs_main['RMSE_all'].mean(), 3)) + '; std = ' +
str(round(Perfs_main['RMSE_all'].std(), 3)) + '; min = ' + str(round(Perfs_main['RMSE_all'].min(), 3)) +
'; max = ' + str(round(Perfs_main['RMSE_all'].max(), 3)))
class SelectCorrelationsNAs(Basics):
"""
Build a summary correlation matrix: when a correlation cannot be computed in terms of samples ("instances") because
the intersection has a small sample size, fill the NA with the correlation computed at the participant's level
("eids").
"""
def __init__(self, target=None):
Basics.__init__(self)
self.target = target
self.folds = ['test']
self.CORRELATIONS = {'*': {'': {}, '_sd': {}, '_str': {}}}
def load_data(self):
for models_type in self.models_types:
self.CORRELATIONS[models_type] = {}
for pred_type in ['instances', 'eids', '*']:
self.CORRELATIONS[models_type][pred_type] = {}
for mode in self.modes:
self.CORRELATIONS[models_type][pred_type][mode] = {}
for fold in self.folds:
if pred_type == '*':
self.CORRELATIONS[models_type][pred_type][mode][fold] = \
pd.read_csv(self.path_data + 'ResidualsCorrelations' + models_type + mode +
'_instances_' + self.target + '_' + fold + '.csv', index_col=0)
else:
self.CORRELATIONS[models_type][pred_type][mode][fold] = \
pd.read_csv(self.path_data + 'ResidualsCorrelations' + models_type + mode + '_' +
pred_type + '_' + self.target + '_' + fold + '.csv', index_col=0)
def fill_na(self):
# Dectect NAs in the instances correlation matrix
for models_type in self.models_types:
NAs_mask = self.CORRELATIONS[models_type]['instances']['']['test'].isna()
for mode in self.modes:
for fold in self.folds:
self.CORRELATIONS[models_type]['*'][mode][fold] = \
self.CORRELATIONS[models_type]['instances'][mode][fold].copy()
self.CORRELATIONS[models_type]['*'][mode][fold][NAs_mask] = \
self.CORRELATIONS[models_type]['eids'][mode][fold][NAs_mask]
def save_correlations(self):
for models_type in self.models_types:
for mode in self.modes:
for fold in self.folds:
self.CORRELATIONS[models_type]['*'][mode][fold].to_csv(self.path_data + 'ResidualsCorrelations' +
models_type + mode + '_*_' + self.target +
'_' + fold + '.csv', index=True)
class CorrelationsAverages:
"""
Computes average correlation at different levels, to summarize the results.
"""
def __init__(self):
self.Performances = pd.read_csv("../data/PERFORMANCES_withEnsembles_ranked_eids_Age_test.csv")
self.Correlations = pd.read_csv("../data/ResidualsCorrelations_eids_Age_test.csv", index_col=0)
def _melt_correlation_matrix(self, models):
models = ['_'.join(c.split('_')[1:]) for c in models]
Corrs = self.Correlations.loc[models, models]
Corrs = Corrs.where(np.triu(np.ones(Corrs.shape), 1).astype(np.bool))
Corrs = Corrs.stack().reset_index()
Corrs.columns = ['Row', 'Column', 'Correlation']
return Corrs
@staticmethod
def _split_version(row):
names = ['organ', 'view', 'transformation', 'architecture', 'n_fc_layers', 'n_fc_nodes', 'optimizer',
'learning_rate', 'weight_decay', 'dropout_rate', 'data_augmentation_factor']
row_names = ['row_' + name for name in names]
col_names = ['col_' + name for name in names]
row_params = row['Row'].split('_')
col_params = row['Column'].split('_')
new_row = pd.Series(row_params + col_params + [row['Correlation']])
new_row.index = row_names + col_names + ['Correlation']
return new_row
@staticmethod
def _compute_stats(data, title):
m = data['Correlation'].mean()
s = data['Correlation'].std()
n = len(data.index)
print('Correlation between ' + title + ': ' + str(round(m, 3)) + '+-' + str(round(s, 3)) + ', n_pairs=' +
str(n))
@staticmethod
def _generate_pairs(ls):
pairs = []
for i in range(len(ls)):
for j in range((i + 1), len(ls)):
pairs.append((ls[i], ls[j]))
return pairs
@staticmethod
def _extract_pair(Corrs, pair, level):
extracted = Corrs[((Corrs['row_' + level] == pair[0]) & (Corrs['col_' + level] == pair[1])) |
((Corrs['row_' + level] == pair[1]) & (Corrs['col_' + level] == pair[0]))]
return extracted
def _extract_pairs(self, Corrs, pairs, level):
extracted = None
for pair in pairs:
extracted_pair = self._extract_pair(Corrs, pair, level)
if extracted is None:
extracted = extracted_pair
else:
extracted = extracted.append(extracted_pair)
return extracted
def correlations_all(self):
Corrs = self._melt_correlation_matrix(self.Performances['version'].values)
self._compute_stats(Corrs, 'All models')
def correlations_dimensions(self):
Perf = self.Performances[(self.Performances['view'] == '*') &
~(self.Performances['organ'].isin(['*', '*instances01', '*instances1.5x',
'*instances23']))]
Corrs = self._melt_correlation_matrix(Perf['version'].values)
self._compute_stats(Corrs, 'Main Dimensions')
def correlations_subdimensions(self):
# Subdimensions
dict_dims_to_subdims = {
'Brain': ['Cognitive', 'MRI'],
'Eyes': ['OCT', 'Fundus', 'IntraocularPressure', 'Acuity', 'Autorefraction'],
'Arterial': ['PulseWaveAnalysis', 'Carotids'],
'Heart': ['ECG', 'MRI'],
'Abdomen': ['Liver', 'Pancreas'],
'Musculoskeletal': ['Spine', 'Hips', 'Knees', 'FullBody', 'Scalars'],
'PhysicalActivity': ['FullWeek', 'Walking'],
'Biochemistry': ['Urine', 'Blood']
}
Corrs_subdim = None
for dim in dict_dims_to_subdims.keys():
models = ['Age_' + dim + '_' + subdim + '_*' * 9 for subdim in dict_dims_to_subdims[dim]]
Corrs_dim = self._melt_correlation_matrix(models)
self._compute_stats(Corrs_dim, dim + ' subdimensions')
if Corrs_subdim is None:
Corrs_subdim = Corrs_dim
else:
Corrs_subdim = Corrs_subdim.append(Corrs_dim)
# Compute the average over the subdimensions
self._compute_stats(Corrs_subdim, 'Subdimensions')
def correlations_subsubdimensions(self):
# Only select the ensemble models at the architecture level,
Perf_ss = self.Performances[self.Performances['architecture'] == '*']
# Brain - Cognitive
Perf = Perf_ss[(Perf_ss['organ'] == 'Brain') & (Perf_ss['view'] == 'Cognitive')]
# Remove ensemble model and all scalars
Perf = Perf[~Perf['transformation'].isin(['*', 'AllScalars'])]
Corrs_bc = self._melt_correlation_matrix(Perf['version'].values)
self._compute_stats(Corrs_bc, 'Brain cognitive sub-subdimensions')
# Musculoskeletal - Scalars
Perf = Perf_ss[(Perf_ss['organ'] == 'Musculoskeletal') & (Perf_ss['view'] == 'Scalars')]
Perf = Perf[~Perf['transformation'].isin(['*', 'AllScalars'])]
Corrs_ms = self._melt_correlation_matrix(Perf['version'].values)
self._compute_stats(Corrs_ms, 'Musculoskeletal - Scalars sub-subdimensions')
# Average over subsubdimensions
Corrs_subsubdimensions = Corrs_bc.append(Corrs_ms)
self._compute_stats(Corrs_subsubdimensions, 'Sub-subdimensions')
def correlations_views(self):
# Variables
dict_dim_to_view = {
'Brain_MRI': [['SagittalReference', 'CoronalReference', 'TransverseReference', 'dMRIWeightedMeans',
'SubcorticalVolumes', 'GreyMatterVolumes'],
['SagittalRaw', 'CoronalRaw', 'TransverseRaw']],
'Arterial_PulseWaveAnalysis': [['Scalars', 'TimeSeries']],
'Arterial_Carotids': [['Scalars', 'LongAxis', 'CIMT120', 'CIMT150', 'ShortAxis']],
'Heart_ECG': [['Scalars', 'TimeSeries']],
'Heart_MRI': [['2chambersRaw', '3chambersRaw', '4chambersRaw'],
['2chambersContrast', '3chambersContrast', '4chambersContrast']],
'Musculoskeletal_Spine': [['Sagittal', 'Coronal']],
'Musculoskeletal_FullBody': [['Figure', 'Flesh']],
'PhysicalActivity_FullWeek': [
['Scalars', 'Acceleration', 'TimeSeriesFeatures', 'GramianAngularField1minDifference',
'GramianAngularField1minSummation', 'MarkovTransitionField1min',
'RecurrencePlots1min']]
}
Corrs_views = None
for dim in dict_dim_to_view.keys():
Corrs_dims = None
for i, views in enumerate(dict_dim_to_view[dim]):
models = ['Age_' + dim + '_' + view + '_*' * 8 for view in dict_dim_to_view[dim][i]]
Corrs_dim = self._melt_correlation_matrix(models)
if Corrs_dims is None:
Corrs_dims = Corrs_dim
else:
Corrs_dims = Corrs_dims.append(Corrs_dim)
self._compute_stats(Corrs_dims, dim + ' views')
if Corrs_views is None:
Corrs_views = Corrs_dims
else:
Corrs_views = Corrs_views.append(Corrs_dims)
# Compute the average over the views
self._compute_stats(Corrs_views, 'Views')
def correlations_transformations(self):
# Raw vs. Contrast (Heart MRI, Abdomen Liver, Abdomen Pancreas), Raw vs. Reference (Brain MRI),
# Figure vs Skeleton (Musculoskeltal FullBody)
# Filter out the models that are ensembles at the architecture level
models_to_keep = [model for model in self.Correlations.index.values if model.split('_')[3] != '*']
# Select only the models that are Heart MRI, Abdomen, or Brain MRI
models_to_keep = [model for model in models_to_keep if
((model.split('_')[0] == 'Abdomen') & (model.split('_')[1] in ['Liver', 'Pancreas'])) |
((model.split('_')[0] == 'Brain') & (model.split('_')[1] == 'MRI')) |
((model.split('_')[0] == 'Heart') & (model.split('_')[1] == 'MRI')) |
((model.split('_')[0] == 'Musculoskeletal') & (model.split('_')[1] == 'FullBody') &
(model.split('_')[2] in ['Figure', 'Skeleton']))]
# Select only the models that have the relevant preprocessing/transformations
models_to_keep = [model for model in models_to_keep if model.split('_')[2] in
['Raw', 'Contrast', '2chambersRaw', '2chambersContrast', '3chambersRaw', '3chambersContrast',
'4chambersRaw', '4chambersContrast', 'SagittalRaw', 'SagittalReference', 'CoronalRaw',
'CoronalReference', 'TransverseRaw', 'TransverseReference', 'Figure', 'Skeleton']]
# Select the corresponding rows and columns
Corrs = self.Correlations.loc[models_to_keep, models_to_keep]
# Melt correlation matrix to dataframe
Corrs = Corrs.where(np.triu(np.ones(Corrs.shape), 1).astype(np.bool))
Corrs = Corrs.stack().reset_index()
Corrs.columns = ['Row', 'Column', 'Correlation']
Corrs = Corrs.apply(self._split_version, axis=1)
# Only keep the models that have the same organ, view and architecture
Corrs = Corrs[(Corrs['row_organ'] == Corrs['col_organ']) & (Corrs['row_view'] == Corrs['col_view']) &
(Corrs['row_architecture'] == Corrs['col_architecture'])]
# Define preprocessing pairs
dict_preprocessing = {
'Raw-Reference': [('SagittalRaw', 'SagittalReference'), ('CoronalRaw', 'CoronalReference'),
('TransverseRaw', 'TransverseReference')],
'Raw-Contrast': [('Raw', 'Contrast'), ('2chambersRaw', '2chambersContrast'),
('3chambersRaw', '3chambersContrast'), ('4chambersRaw', '4chambersContrast')],
'Figure-Skeleton': [('Figure', 'Skeleton')]
}
# Compute average correlation between each pair of transformations
Corrs_transformations = None
for comparison in dict_preprocessing.keys():
Corrs_comp = self._extract_pairs(Corrs, dict_preprocessing[comparison], 'transformation')
print(comparison)
print(Corrs_comp)
self._compute_stats(Corrs_comp, comparison)
if Corrs_transformations is None:
Corrs_transformations = Corrs_comp
else:
Corrs_transformations = Corrs_transformations.append(Corrs_comp)
# Compute average correlation between transformations
self._compute_stats(Corrs_transformations, 'Transformations')
def correlations_algorithms(self):
# Variables
algorithms_scalars = ['ElasticNet', 'LightGBM', 'NeuralNetwork']
algorithms_images = ['InceptionV3', 'InceptionResNetV2']
# Filter out the ensemble models (at the level of the algorithm)
models_to_keep = [model for model in self.Correlations.index.values if model.split('_')[3] != '*']
Corrs = self.Correlations.loc[models_to_keep, models_to_keep]
# Melt correlation matrix to dataframe
Corrs = Corrs.where(np.triu(np.ones(Corrs.shape), 1).astype(np.bool))
Corrs = Corrs.stack().reset_index()
Corrs.columns = ['Row', 'Column', 'Correlation']
Corrs = Corrs.apply(self._split_version, axis=1)
# Select the rows for which everything is identical aside from the dataset
for name in ['organ', 'view', 'transformation']:
Corrs = Corrs[Corrs['row_' + name] == Corrs['col_' + name]]
# Compute average correlation between algorithms
self._compute_stats(Corrs, 'Algorithms')
algorithms_pairs = self._generate_pairs(algorithms_scalars) + self._generate_pairs(algorithms_images)
# Compute average correlation between each algorithm pair
for pair in algorithms_pairs:
Corrs_pair = self._extract_pair(Corrs, pair, 'architecture')
self._compute_stats(Corrs_pair, pair[0] + ' and ' + pair[1])
class AttentionMaps(DeepLearning):
"""
Computes the attention maps (saliency maps and Grad_RAM maps) for all images
"""
def __init__(self, target=None, organ=None, view=None, transformation=None, debug_mode=False):
# Partial initialization with placeholders to get access to parameters and functions
DeepLearning.__init__(self, 'Age', 'Abdomen', 'Liver', 'Raw', 'InceptionResNetV2', '1', '1024', 'Adam',
'0.0001', '0.1', '0.5', '1.0', False)
# Parameters
self.target = target
self.organ = organ
self.view = view
self.transformation = transformation
self.version = None
self.leftright = True if self.organ + '_' + self.view in self.left_right_organs_views else False
self.parameters = None
self.image_width = None
self.image_height = None
self.batch_size = None
self.N_samples_attentionmaps = 10 # needs to be > 1 for the script to work
if debug_mode:
self.N_samples_attentionmaps = 2
self.dir_images = '../images/' + organ + '/' + view + '/' + transformation + '/'
self.prediction_type = self.dict_prediction_types[target]
self.Residuals = None
self.df_to_plot = None
self.df_outer_fold = None
self.class_mode = None
self.image = None
self.generator = None
self.dict_architecture_to_last_conv_layer_name = \
{'VGG16': 'block5_conv3', 'VGG19': 'block5_conv4', 'MobileNet': 'conv_pw_13_relu',
'MobileNetV2': 'out_relu', 'DenseNet121': 'relu', 'DenseNet169': 'relu', 'DenseNet201': 'relu',
'NASNetMobile': 'activation_1136', 'NASNetLarge': 'activation_1396', 'Xception': 'block14_sepconv2_act',
'InceptionV3': 'mixed10', 'InceptionResNetV2': 'conv_7b_ac', 'EfficientNetB7': 'top_activation'}
self.last_conv_layer = None
self.organs_views_transformations_images = \
['Brain_MRI_SagittalRaw', 'Brain_MRI_SagittalReference', 'Brain_MRI_CoronalRaw',
'Brain_MRI_CoronalReference', 'Brain_MRI_TransverseRaw', 'Brain_MRI_TransverseReference',
'Eyes_Fundus_Raw', 'Eyes_OCT_Raw', 'Arterial_Carotids_Mixed', 'Arterial_Carotids_LongAxis',
'Arterial_Carotids_CIMT120', 'Arterial_Carotids_CIMT150', 'Arterial_Carotids_ShortAxis',
'Heart_MRI_2chambersRaw', 'Heart_MRI_2chambersContrast', 'Heart_MRI_3chambersRaw',
'Heart_MRI_3chambersContrast', 'Heart_MRI_4chambersRaw', 'Heart_MRI_4chambersContrast',
'Abdomen_Liver_Raw', 'Abdomen_Liver_Contrast', 'Abdomen_Pancreas_Raw', 'Abdomen_Pancreas_Contrast',
'Musculoskeletal_Spine_Sagittal', 'Musculoskeletal_Spine_Coronal', 'Musculoskeletal_Hips_MRI',
'Musculoskeletal_Knees_MRI', 'Musculoskeletal_FullBody_Mixed', 'Musculoskeletal_FullBody_Figure',
'Musculoskeletal_FullBody_Skeleton', 'Musculoskeletal_FullBody_Flesh',
'PhysicalActivity_FullWeek_GramianAngularField1minDifference',
'PhysicalActivity_FullWeek_GramianAngularField1minSummation',
'PhysicalActivity_FullWeek_MarkovTransitionField1min', 'PhysicalActivity_FullWeek_RecurrencePlots1min']
def _select_best_model(self):
# Pick the best model based on the performances
path_perf = self.path_data + 'PERFORMANCES_withoutEnsembles_ranked_instances_' + self.target + '_test.csv'
Performances = pd.read_csv(path_perf).set_index('version', drop=False)
Performances = Performances[(Performances['organ'] == self.organ)
& (Performances['view'] == self.view)
& (Performances['transformation'] == self.transformation)]
self.version = Performances['version'].values[0]
del Performances
# other parameters
self.parameters = self._version_to_parameters(self.version)
if self.organ + '_' + self.view + '_' + self.transformation in self.organs_views_transformations_images:
DeepLearning.__init__(self, self.parameters['target'], self.parameters['organ'], self.parameters['view'],
self.parameters['transformation'], self.parameters['architecture'],
self.parameters['n_fc_layers'], self.parameters['n_fc_nodes'],
self.parameters['optimizer'], self.parameters['learning_rate'],
self.parameters['weight_decay'], self.parameters['dropout_rate'],
self.parameters['data_augmentation_factor'], False)
def _format_residuals(self):
# Format the residuals
Residuals_full = pd.read_csv(self.path_data + 'RESIDUALS_instances_' + self.target + '_test.csv')
Residuals = Residuals_full[['id', 'outer_fold'] + self.demographic_vars + ['res_' + self.version]]
del Residuals_full
Residuals.dropna(inplace=True)
Residuals.rename(columns={'res_' + self.version: 'res'}, inplace=True)
Residuals.set_index('id', drop=False, inplace=True)
Residuals['outer_fold'] = Residuals['outer_fold'].astype(int).astype(str)
Residuals['res_abs'] = Residuals['res'].abs()
self.Residuals = Residuals
def _select_representative_samples(self):
# Select with samples to plot
print('Selecting representative samples...')
df_to_plot = None
# Sex
dict_sexes_to_values = {'Male': 1, 'Female': 0}
for sex in ['Male', 'Female']:
print('Sex: ' + sex)
Residuals_sex = self.Residuals[self.Residuals['Sex'] == dict_sexes_to_values[sex]]
Residuals_sex['sex'] = sex
# Age category
for age_category in ['young', 'middle', 'old']:
print('Age category: ' + age_category)
if age_category == 'young':
Residuals_age = Residuals_sex[Residuals_sex['Age'] <= Residuals_sex['Age'].min() + 10]
elif age_category == 'middle':
Residuals_age = Residuals_sex[(Residuals_sex['Age'] - Residuals_sex['Age'].median()).abs() < 5]
else:
Residuals_age = Residuals_sex[Residuals_sex['Age'] >= Residuals_sex['Age'].max() - 10]
Residuals_age['age_category'] = age_category
# Aging rate
for aging_rate in ['accelerated', 'normal', 'decelerated']:
print('Aging rate: ' + aging_rate)
Residuals_ar = Residuals_age
if aging_rate == 'accelerated':
Residuals_ar.sort_values(by='res', ascending=True, inplace=True)
elif aging_rate == 'decelerated':
Residuals_ar.sort_values(by='res', ascending=False, inplace=True)
else:
Residuals_ar.sort_values(by='res_abs', ascending=True, inplace=True)
Residuals_ar['aging_rate'] = aging_rate
Residuals_ar = Residuals_ar.iloc[:self.N_samples_attentionmaps, ]
Residuals_ar['sample'] = range(len(Residuals_ar.index))
if df_to_plot is None:
df_to_plot = Residuals_ar
else:
df_to_plot = df_to_plot.append(Residuals_ar)
# Postprocessing
df_to_plot['Biological_Age'] = df_to_plot['Age'] - df_to_plot['res']
activations_path = '../figures/Attention_Maps/' + self.target + '/' + self.organ + '/' + self.view + '/' + \
self.transformation + '/' + df_to_plot['sex'] + '/' + df_to_plot['age_category'] + '/' + \
df_to_plot['aging_rate']
file_names = '/imagetypeplaceholder_' + self.target + '_' + self.organ + '_' + self.view + '_' + \
self.transformation + '_' + df_to_plot['sex'] + '_' + df_to_plot['age_category'] + '_' + \
df_to_plot['aging_rate'] + '_' + df_to_plot['sample'].astype(str)
if self.leftright:
activations_path += '/sideplaceholder'
file_names += '_sideplaceholder'
df_to_plot['save_title'] = activations_path + file_names
path_save = self.path_data + 'AttentionMaps-samples_' + self.target + '_' + self.organ + '_' + self.view + \
'_' + self.transformation + '.csv'
df_to_plot.to_csv(path_save, index=False)
self.df_to_plot = df_to_plot
def preprocessing(self):
self._select_best_model()
self._format_residuals()
self._select_representative_samples()
def _preprocess_for_outer_fold(self, outer_fold):
self.df_outer_fold = self.df_to_plot[self.df_to_plot['outer_fold'] == outer_fold]
self.n_images = len(self.df_outer_fold.index)
if self.leftright:
self.n_images *= 2
# Generate the data generator(s)
self.n_images_batch = self.n_images // self.batch_size * self.batch_size
self.n_samples_batch = self.n_images_batch // 2 if self.leftright else self.n_images_batch
self.df_batch = self.df_outer_fold.iloc[:self.n_samples_batch, :]
if self.n_images_batch > 0:
self.generator_batch = \
MyImageDataGenerator(target=self.target, organ=self.organ, view=self.view,
data_features=self.df_batch, n_samples_per_subepoch=None,
batch_size=self.batch_size, training_mode=False,
side_predictors=self.side_predictors, dir_images=self.dir_images,
images_width=self.image_width, images_height=self.image_height,
data_augmentation=False, data_augmentation_factor=None, seed=self.seed)
else:
self.generator_batch = None
self.n_samples_leftovers = self.n_images % self.batch_size
self.df_leftovers = self.df_outer_fold.iloc[self.n_samples_batch:, :]
if self.n_samples_leftovers > 0:
self.generator_leftovers = \
MyImageDataGenerator(target=self.target, organ=self.organ, view=self.view,
data_features=self.df_leftovers, n_samples_per_subepoch=None,
batch_size=self.n_samples_leftovers, training_mode=False,
side_predictors=self.side_predictors, dir_images=self.dir_images,
images_width=self.image_width, images_height=self.image_height,
data_augmentation=False, data_augmentation_factor=None, seed=self.seed)
else:
self.generator_leftovers = None
# load the weights for the fold (for test images in fold i, load the corresponding model: (i-1)%N_CV_folds
outer_fold_model = str((int(outer_fold) - 1) % self.n_CV_outer_folds)
self.model.load_weights(self.path_data + 'model-weights_' + self.version + '_' + outer_fold_model + '.h5')
@staticmethod
def _process_saliency(saliency):
saliency *= 255 / np.max(np.abs(saliency))
saliency = saliency.astype(int)
r_ch = saliency.copy()
r_ch[r_ch < 0] = 0
b_ch = -saliency.copy()
b_ch[b_ch < 0] = 0
g_ch = saliency.copy() * 0
a_ch = np.maximum(b_ch, r_ch)
saliency = np.dstack((r_ch, g_ch, b_ch, a_ch))
return saliency
@staticmethod
def _process_gradcam(gradcam):
# rescale to 0-255
gradcam = np.maximum(gradcam, 0) / np.max(gradcam)
gradcam = np.uint8(255 * gradcam)
# Convert to rgb
jet = cm.get_cmap("jet")
jet_colors = jet(np.arange(256))[:, :3]
jet_gradcam = jet_colors[gradcam]
jet_gradcam = array_to_img(jet_gradcam)
jet_gradcam = jet_gradcam.resize((gradcam.shape[1], gradcam.shape[0]))
jet_gradcam = img_to_array(jet_gradcam)
return jet_gradcam
def _generate_maps_for_one_batch(self, df, Xs, y):
# Generate saliency
saliencies = get_gradients_of_activations(self.model, Xs, y, layer_name='input_1')['input_1'].sum(axis=3)
# Generate gradam
weights = get_gradients_of_activations(self.model, Xs, y, layer_name=self.last_conv_layer,
)[self.last_conv_layer]
weights = weights.mean(axis=(1, 2))
weights /= np.abs(weights.max()) + 1e-7 # for numerical stability
activations = get_activations(self.model, Xs, layer_name=self.last_conv_layer)[self.last_conv_layer]
# We must take the absolute value because for Grad-RAM, unlike for Grad-Cam, we care both about + and - effects
gradcams = np.abs(np.einsum('il,ijkl->ijk', weights, activations))
zoom_factor = [1] + list(np.array(Xs[0].shape[1:3]) / np.array(gradcams.shape[1:]))
gradcams = zoom(gradcams, zoom_factor)
# Save single images and filters
for j in range(len(y)):
# select sample
if self.leftright:
idx = j // 2
side = 'right' if j % 2 == 0 else 'left'
else:
idx = j
side = None
path = df['save_title'].values[idx]
ID = df['id'].values[idx]
# create directory tree if necessary
if self.leftright:
path = path.replace('sideplaceholder', side)
path_dir = '/'.join(path.split('/')[:-1])
if not os.path.exists(path_dir):
os.makedirs(path_dir)
# Save raw image
# Compute path to test if images existed in first place
path_image = '../images/' + self.organ + '/' + self.view + '/' + self.transformation + '/'
if self.leftright:
path_image += side + '/'
path_image += ID + '.jpg'
if not os.path.exists(path_image):
print('No image found at ' + path_image + ', skipping.')
continue
img = load_img(path_image, target_size=(saliencies.shape[1], saliencies.shape[2]))
img.save(path.replace('imagetypeplaceholder', 'RawImage') + '.jpg')
# Save saliency
saliency = saliencies[j, :, :]
saliency = self._process_saliency(saliency)
np.save(path.replace('imagetypeplaceholder', 'Saliency') + '.npy', saliency)
# Save gradcam
gradcam = gradcams[j, :, :]
gradcam = self._process_gradcam(gradcam)
np.save(path.replace('imagetypeplaceholder', 'Gradcam') + '.npy', gradcam)
def generate_filters(self):
if self.organ + '_' + self.view + '_' + self.transformation in self.organs_views_transformations_images:
self._generate_architecture()
self.model.compile(optimizer=self.optimizers[self.optimizer](lr=self.learning_rate, clipnorm=1.0),
loss=self.loss_function, metrics=self.metrics)
self.last_conv_layer = self.dict_architecture_to_last_conv_layer_name[self.parameters['architecture']]
for outer_fold in self.outer_folds:
print('Generate attention maps for outer_fold ' + outer_fold)
gc.collect()
self._preprocess_for_outer_fold(outer_fold)
n_samples_per_batch = self.batch_size // 2 if self.leftright else self.batch_size
for i in range(self.n_images // self.batch_size):
print('Generating maps for batch ' + str(i))
Xs, y = self.generator_batch.__getitem__(i)
df = self.df_batch.iloc[n_samples_per_batch * i: n_samples_per_batch * (i + 1), :]
self._generate_maps_for_one_batch(df, Xs, y)
if self.n_samples_leftovers > 0:
print('Generating maps for leftovers')
Xs, y = self.generator_leftovers.__getitem__(0)
self._generate_maps_for_one_batch(self.df_leftovers, Xs, y)
class GWASPreprocessing(Basics):
"""
Preprocesses the data for the GWASs.
"""
def __init__(self, target=None):
Basics.__init__(self)
self.target = target
self.fam = None
self.Residuals = None
self.covars = None
self.data = None
self.list_organs = None
self.IIDs_organs = {}
self.IIDs_organ_pairs = {}
def _generate_fam_file(self):
fam = pd.read_csv('/n/groups/patel/uk_biobank/project_52887_genetics/ukb52887_cal_chr1_v2_s488264.fam',
header=None, sep=' ')
fam.columns = ['FID', 'IID', 'father', 'mother', 'Sex', 'phenotype']
fam['phenotype'] = 1
fam.to_csv(self.path_data + 'GWAS.fam', index=False, header=False, sep=' ')
fam.to_csv(self.path_data + 'GWAS_exhaustive_placeholder.tab', index=False, sep='\t')
self.fam = fam
def _preprocess_residuals(self):
# Load residuals
Residuals = pd.read_csv(self.path_data + 'RESIDUALS_bestmodels_eids_' + self.target + '_test.csv')
Residuals['id'] = Residuals['eid']
Residuals.rename(columns={'id': 'FID', 'eid': 'IID'}, inplace=True)
Residuals = Residuals[Residuals['Ethnicity.White'] == 1]
cols_to_drop = ['instance', 'outer_fold', 'Sex'] + \
[col for col in Residuals.columns.values if 'Ethnicity.' in col]
Residuals.drop(columns=cols_to_drop, inplace=True)
self.Residuals = Residuals
self.list_organs = [col for col in self.Residuals.columns.values if col not in ['FID', 'IID', 'Age']]
def _preprocess_covars(self):
# Load covars
covar_cols = ['eid', '22001-0.0', '21000-0.0', '54-0.0', '22000-0.0'] + ['22009-0.' + str(i) for i in
range(1, 41)]
covars = pd.read_csv('/n/groups/patel/uk_biobank/project_52887_41230/ukb41230.csv', usecols=covar_cols)
dict_rename = {'eid': 'IID', '22001-0.0': 'Sex', '21000-0.0': 'Ethnicity', '54-0.0': 'Assessment_center',
'22000-0.0': 'Genotyping_batch'}
for i in range(1, 41):
dict_rename.update(dict.fromkeys(['22009-0.' + str(i)], 'PC' + str(i)))
covars.rename(columns=dict_rename, inplace=True)
covars.dropna(inplace=True)
covars['Sex'][covars['Sex'] == 0] = 2
covars['Sex'] = covars['Sex'].astype(int)
# remove non whites samples as suggested in BOLT-LMM_v2.3.4_manual.pdf p18
covars = covars[covars['Ethnicity'].isin([1, 1001, 1002, 1003])]
self.covars = covars
def _merge_main_data(self):
# Merge both dataframes
self.data = self.covars.merge(self.Residuals, on=['IID'])
reordered_cols = ['FID', 'IID', 'Assessment_center', 'Genotyping_batch', 'Age', 'Sex', 'Ethnicity'] + \
['PC' + str(i) for i in range(1, 41)] + self.list_organs
self.data = self.data[reordered_cols]
print('Preparing data for heritabilities')
for organ in self.list_organs:
print('Preparing data for ' + organ)
data_organ = self.data.copy()
cols_to_drop = [organ2 for organ2 in self.list_organs if organ2 != organ]
data_organ.drop(columns=cols_to_drop, inplace=True)
data_organ.dropna(inplace=True)
data_organ.to_csv(self.path_data + 'GWAS_data_' + self.target + '_' + organ + '.tab', index=False,
sep='\t')
self.IIDs_organs[organ] = data_organ['IID'].values
def _preprocessing_genetic_correlations(self):
print('Preparing data for genetic correlations')
organs_pairs = pd.DataFrame(columns=['organ1', 'organ2'])
for counter, organ1 in enumerate(self.list_organs):
for organ2 in self.list_organs[(counter + 1):]:
print('Preparing data for the organ pair ' + organ1 + ' and ' + organ2)
# Generate GWAS dataframe
organs_pairs = organs_pairs.append({'organ1': organ1, 'organ2': organ2}, ignore_index=True)
data_organ_pair = self.data.copy()
cols_to_drop = [organ3 for organ3 in self.list_organs if organ3 not in [organ1, organ2]]
data_organ_pair.drop(columns=cols_to_drop, inplace=True)
data_organ_pair.dropna(inplace=True)
data_organ_pair.to_csv(self.path_data + 'GWAS_data_' + self.target + '_' + organ1 + '_' + organ2 +
'.tab', index=False, sep='\t')
self.IIDs_organ_pairs[organ1 + '_' + organ2] = data_organ_pair['IID'].values
organs_pairs.to_csv(self.path_data + 'GWAS_genetic_correlations_pairs_' + self.target + '.csv', header=False,
index=False)
def _list_removed(self):
# samples to remove for each organ
print('Listing samples to remove for each organ')
for organ in self.list_organs:
print('Preparing samples to remove for organ ' + organ)
remove_organ = self.fam[['FID', 'IID']].copy()
remove_organ = remove_organ[-remove_organ['IID'].isin(self.IIDs_organs[organ])]
remove_organ.to_csv(self.path_data + 'GWAS_remove_' + self.target + '_' + organ + '.tab', index=False,
header=False, sep=' ')
# samples to remove for each organ pair
print('Listing samples to remove for each organ pair')
for counter, organ1 in enumerate(self.list_organs):
for organ2 in self.list_organs[(counter + 1):]:
print('Preparing samples to remove for organ pair ' + organ1 + ' and ' + organ2)
remove_organ_pair = self.fam[['FID', 'IID']].copy()
remove_organ_pair = \
remove_organ_pair[-remove_organ_pair['IID'].isin(self.IIDs_organ_pairs[organ1 + '_' + organ2])]
remove_organ_pair.to_csv(self.path_data + 'GWAS_remove_' + self.target + '_' + organ1 + '_' + organ2 +
'.tab', index=False, header=False, sep=' ')
def compute_gwas_inputs(self):
self._generate_fam_file()
self._preprocess_residuals()
self._preprocess_covars()
self._merge_main_data()
self._preprocessing_genetic_correlations()
self._list_removed()
class GWASPostprocessing(Basics):
"""
Postprocesses the GWAS results and stores the results in summary files.
"""
def __init__(self, target=None):
Basics.__init__(self)
self.target = target
self.organ = None
self.GWAS = None
self.FDR_correction = 5e-8
def _processing(self):
self.GWAS = pd.read_csv(self.path_data + 'GWAS_' + self.target + '_' + self.organ + '_X.stats', sep='\t')
GWAS_autosome = pd.read_csv(self.path_data + 'GWAS_' + self.target + '_' + self.organ + '_autosome.stats',
sep='\t')
self.GWAS[self.GWAS['CHR'] != 23] = GWAS_autosome
self.GWAS_hits = self.GWAS[self.GWAS['P_BOLT_LMM_INF'] < self.FDR_correction]
def _save_data(self):
self.GWAS.to_csv(self.path_data + 'GWAS_' + self.target + '_' + self.organ + '.csv', index=False)
self.GWAS_hits.to_csv(self.path_data + 'GWAS_hits_' + self.target + '_' + self.organ + '.csv', index=False)
def _merge_all_hits(self):
print('Merging all the GWAS results into a model called All...')
# Summarize all the significant SNPs
files = [file for file in glob.glob(self.path_data + 'GWAS_hits*')
if ('All' not in file) & ('_withGenes' not in file)]
All_hits = None
print(files)
for file in files:
print(file)
hits_organ = pd.read_csv(file)[
['SNP', 'CHR', 'BP', 'GENPOS', 'ALLELE1', 'ALLELE0', 'A1FREQ', 'F_MISS', 'CHISQ_LINREG',
'P_LINREG', 'BETA', 'SE', 'CHISQ_BOLT_LMM_INF', 'P_BOLT_LMM_INF']]
hits_organ['organ'] = '.'.join(file.split('_')[-1].split('.')[:-1])
if All_hits is None:
All_hits = hits_organ
else:
All_hits = pd.concat([All_hits, hits_organ])
All_hits.sort_values(by=['CHR', 'BP'], inplace=True)
All_hits.to_csv(self.path_data + 'GWAS_hits_' + self.target + '_All.csv', index=False)
def processing_all_organs(self):
if not os.path.exists('../figures/GWAS/'):
os.makedirs('../figures/GWAS/')
for organ in self.organs_XWAS:
if os.path.exists(self.path_data + 'GWAS_' + self.target + '_' + organ + '_X.stats') & \
os.path.exists(self.path_data + 'GWAS_' + self.target + '_' + organ + '_autosome.stats'):
print('Processing data for organ ' + organ)
self.organ = organ
self._processing()
self._save_data()
self._merge_all_hits()
@staticmethod
def _grep(pattern, path):
for line in open(path, 'r'):
if line.find(pattern) > -1:
return True
return False
@staticmethod
def _melt_correlation_matrix(Correlations, models):
Corrs = Correlations.loc[models, models]
Corrs = Corrs.where(np.triu(np.ones(Corrs.shape), 1).astype(np.bool))
Corrs = Corrs.stack().reset_index()
Corrs.columns = ['Row', 'Column', 'Correlation']
return Corrs
@staticmethod
def _compute_stats(data, title):
m = data['Correlation'].mean()
s = data['Correlation'].std()
n = len(data.index)
print('Correlation between ' + title + ': ' + str(round(m, 3)) + '+-' + str(round(s, 3)) +
', n_pairs=' + str(n))
def parse_heritability_scores(self):
# Generate empty dataframe
Heritabilities = np.empty((len(self.organs_XWAS), 3,))
Heritabilities.fill(np.nan)
Heritabilities = pd.DataFrame(Heritabilities)
Heritabilities.index = self.organs_XWAS
Heritabilities.columns = ['Organ', 'h2', 'h2_sd']
# Fill the dataframe
for organ in self.organs_XWAS:
path = '../eo/MI09C_reml_' + self.target + '_' + organ + '_X.out'
if os.path.exists(path) and self._grep("h2g", path):
for line in open('../eo/MI09C_reml_' + self.target + '_' + organ + '_X.out', 'r'):
if line.find('h2g (1,1): ') > -1:
h2 = float(line.split()[2])
h2_sd = float(line.split()[-1][1:-2])
Heritabilities.loc[organ, :] = [organ, h2, h2_sd]
# Print and save results
print('Heritabilities:')
print(Heritabilities)
Heritabilities.to_csv(self.path_data + 'GWAS_heritabilities_' + self.target + '.csv', index=False)
def parse_genetic_correlations(self):
# Generate empty dataframe
Genetic_correlations = np.empty((len(self.organs_XWAS), len(self.organs_XWAS),))
Genetic_correlations.fill(np.nan)
Genetic_correlations = pd.DataFrame(Genetic_correlations)
Genetic_correlations.index = self.organs_XWAS
Genetic_correlations.columns = self.organs_XWAS
Genetic_correlations_sd = Genetic_correlations.copy()
Genetic_correlations_str = Genetic_correlations.copy()
# Fill the dataframe
for counter, organ1 in enumerate(self.organs_XWAS):
for organ2 in self.organs_XWAS[(counter + 1):]:
if os.path.exists('../eo/MI09D_' + self.target + '_' + organ1 + '_' + organ2 + '.out'):
for line in open('../eo/MI09D_' + self.target + '_' + organ1 + '_' + organ2 + '.out', 'r'):
if line.find('gen corr (1,2):') > -1:
corr = float(line.split()[3])
corr_sd = float(line.split()[-1][1:-2])
corr_str = "{:.3f}".format(corr) + '+-' + "{:.3f}".format(corr_sd)
Genetic_correlations.loc[organ1, organ2] = corr
Genetic_correlations.loc[organ2, organ1] = corr
Genetic_correlations_sd.loc[organ1, organ2] = corr_sd
Genetic_correlations_sd.loc[organ2, organ1] = corr_sd
Genetic_correlations_str.loc[organ1, organ2] = corr_str
Genetic_correlations_str.loc[organ2, organ1] = corr_str
# Print and save the results
print('Genetic correlations:')
print(Genetic_correlations)
Genetic_correlations.to_csv(self.path_data + 'GWAS_correlations_' + self.target + '.csv')
Genetic_correlations_sd.to_csv(self.path_data + 'GWAS_correlations_sd_' + self.target + '.csv')
Genetic_correlations_str.to_csv(self.path_data + 'GWAS_correlations_str_' + self.target + '.csv')
# Save sample size for the GWAS correlations
Correlations_sample_sizes = Genetic_correlations.copy()
Correlations_sample_sizes = Correlations_sample_sizes * np.NaN
dimensions = Correlations_sample_sizes.columns.values
for i1, dim1 in enumerate(dimensions):
for i2, dim2 in enumerate(dimensions[i1:]):
# Find the sample size
path = '../data/GWAS_data_Age_' + dim1 + '_' + dim2 + '.tab'
if os.path.exists(path):
ss = len(pd.read_csv(path, sep='\t').index)
Correlations_sample_sizes.loc[dim1, dim2] = ss
Correlations_sample_sizes.loc[dim2, dim1] = ss
Correlations_sample_sizes.to_csv(self.path_data + 'GWAS_correlations_sample_sizes_' + self.target + '.csv')
# Print correlations between main dimensions
main_dims = ['Abdomen', 'Musculoskeletal', 'Lungs', 'Eyes', 'Heart', 'Arterial', 'Brain', 'Biochemistry',
'Hearing', 'ImmuneSystem', 'PhysicalActivity']
Corrs_main = self._melt_correlation_matrix(Correlations, main_dims)
Corrs_main_sd = self._melt_correlation_matrix(Correlations_sd, main_dims)
Corrs_main['Correlation_sd'] = Corrs_main_sd['Correlation']
Corrs_main['Correlation_str'] = Corrs_main['Correlation'] + '+-' + Corrs_main['Correlation_sd']
# Fill the table with sample sizes
sample_sizes = []
to_remove_ss = []
for i, row in Corrs_main.iterrows():
# Fill the sample size
sample_size = Correlations_sample_sizes.loc[row['Row'], row['Column']]
if sample_size <= 15000:
to_remove_ss.append(i)
sample_sizes.append(sample_size)
Corrs_main['Sample_size'] = sample_sizes
self._compute_stats(Corrs_main, 'all pairs')
self._compute_stats(Corrs_main.drop(index=to_remove_ss), 'after filtering sample sizes <= 15000')
# Print correlations between subdimensions
pairs_all = \
[['BrainMRI', 'BrainCognitive'], ['EyesOCT', 'EyesFundus'], ['HeartECG', 'HeartMRI'],
['AbdomenLiver', 'AbdomenPancreas'], ['BiochemistryBlood', 'BiochemistryUrine'],
['MusculoskeletalScalars', 'MusculoskeletalFullBody'], ['MusculoskeletalScalars', 'MusculoskeletalSpine'],
['MusculoskeletalScalars', 'MusculoskeletalHips'], ['MusculoskeletalScalars', 'MusculoskeletalKnees'],
['MusculoskeletalFullBody', 'MusculoskeletalSpine'], ['MusculoskeletalFullBody', 'MusculoskeletalHips'],
['MusculoskeletalFullBody', 'MusculoskeletalKnees'], ['MusculoskeletalSpine', 'MusculoskeletalHips'],
['MusculoskeletalSpine', 'MusculoskeletalKnees'], ['MusculoskeletalHips', 'MusculoskeletalKnees']]
pairs_musculo = \
[['MusculoskeletalScalars', 'MusculoskeletalFullBody'], ['MusculoskeletalScalars', 'MusculoskeletalSpine'],
['MusculoskeletalScalars', 'MusculoskeletalHips'], ['MusculoskeletalScalars', 'MusculoskeletalKnees']]
pairs_musculo_images = \
[['MusculoskeletalFullBody', 'MusculoskeletalSpine'], ['MusculoskeletalFullBody', 'MusculoskeletalHips'],
['MusculoskeletalFullBody', 'MusculoskeletalKnees'], ['MusculoskeletalSpine', 'MusculoskeletalHips'],
['MusculoskeletalSpine', 'MusculoskeletalKnees'], ['MusculoskeletalHips', 'MusculoskeletalKnees']]
PAIRS = {'all subdimensions': pairs_all, 'musculo scalars vs others': pairs_musculo,
'musculo-no scalars': pairs_musculo_images}
for _, (key, pairs) in enumerate(PAIRS.items()):
print(key)
cors_pairs = []
for pair in pairs:
cor = Correlations.loc[pair[0], pair[1]]
cor_sd = Correlations_sd.loc[pair[0], pair[1]]
ss = Correlations_sample_sizes.loc[pair[0], pair[1]]
cors_pairs.append(cor)
print('Correlation between ' + pair[0] + ' and ' + pair[1] + ' = ' + str(round(cor, 3)) + '+-' +
str(round(cor_sd, 3)) + '; sample size = ' + str(ss))
print('Mean correlation for ' + key + ' = ' + str(round(np.mean(cors_pairs), 3)) + '+-' +
str(round(np.std(cors_pairs), 3)) + ', number of pairs = ' + str(len(pairs)))
@staticmethod
def compare_phenotypic_correlation_with_genetic_correlations():
Phenotypic_correlations = pd.read_csv('../data/ResidualsCorrelations_bestmodels_eids_Age_test.csv', index_col=0)
Phenotypic_correlations_sd = pd.read_csv('../data/ResidualsCorrelations_bestmodels_sd_eids_Age_test.csv',
index_col=0)
Genetic_correlations = | pd.read_csv('../data/GWAS_correlations_Age.csv', index_col=0) | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.