prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
import pytest
import featuretools as ft
from featuretools.entityset import EntitySet
from featuretools.utils.gen_utils import import_or_none
ks = import_or_none('databricks.koalas')
@pytest.mark.skipif('not ks')
def test_single_table_ks_entityset():
primitives_list = ['absolute', 'is_weekend', 'year', 'day', 'num_characters', 'num_words']
ks_es = EntitySet(id="ks_es")
df = pd.DataFrame({"id": [0, 1, 2, 3],
"values": [1, 12, -34, 27],
"dates": [pd.to_datetime('2019-01-10'),
pd.to_datetime('2019-02-03'),
pd.to_datetime('2019-01-01'),
pd.to_datetime('2017-08-25')],
"strings": ["I am a string",
"23",
"abcdef ghijk",
""]})
values_dd = ks.from_pandas(df)
vtypes = {
"id": ft.variable_types.Id,
"values": ft.variable_types.Numeric,
"dates": ft.variable_types.Datetime,
"strings": ft.variable_types.NaturalLanguage
}
ks_es.entity_from_dataframe(entity_id="data",
dataframe=values_dd,
index="id",
variable_types=vtypes)
ks_fm, _ = ft.dfs(entityset=ks_es,
target_entity="data",
trans_primitives=primitives_list)
pd_es = ft.EntitySet(id="pd_es")
pd_es.entity_from_dataframe(entity_id="data",
dataframe=df,
index="id",
variable_types={"strings": ft.variable_types.NaturalLanguage})
fm, _ = ft.dfs(entityset=pd_es,
target_entity="data",
trans_primitives=primitives_list)
ks_computed_fm = ks_fm.to_pandas().set_index('id').loc[fm.index][fm.columns]
# NUM_WORDS(strings) is int32 in koalas for some reason
| pd.testing.assert_frame_equal(fm, ks_computed_fm, check_dtype=False) | pandas.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 6 17:20:54 2017
@author: thuzhang
"""
# ARIMA
from statsmodels.tsa.arima_model import ARMA
from statsmodels.tsa.arima_model import ARMAResults
import numpy as np
import statsmodels.tsa.stattools as ts
import statsmodels.api as sm
import numpy as np
from matplotlib import pyplot as plt
import pandas as pd
from DeNoise import Denoise
from ParseData import ParseData
from statsmodels.tsa.seasonal import seasonal_decompose
def Sigmoid(X,A,B,r):
return A*B*np.exp(r*X)/(A+B*(np.exp(r*X)-1))
StartTime=48
LengthOfData=720
LengthOfPredict=24
OriginData=(ParseData(File='7576.csv',Sts=StartTime,Length=LengthOfData+LengthOfPredict+1))
SigmaUp=np.average(OriginData)+2*np.sqrt(np.var(OriginData))
SigmaDown=np.average(OriginData)-2*np.sqrt(np.var(OriginData))
MeanData=np.average(OriginData)
RelativeValue=np.log(SigmaUp/SigmaDown-1)/MeanData
print(np.average(OriginData),np.sqrt(np.var(OriginData)))
def Logistic(x):
return Sigmoid(x,SigmaUp,SigmaDown,RelativeValue)
def test_stationarity(timeseries):
# 决定起伏统计
rolmean = pd.rolling_mean(timeseries, window=100) # 对size个数据进行移动平均
rol_weighted_mean = pd.ewma(timeseries, span=36) # 对size个数据进行加权移动平均
rolstd = | pd.rolling_std(timeseries, window=100) | pandas.rolling_std |
# -*- coding: utf-8 -*-
import io
import os
import pickle
import math
import numpy as np
import pandas as pd
import lightgbm as lgb
from tqdm.auto import tqdm
class ScoringService(object):
# 訓練期間終了日
TRAIN_END_HIGH = '2019-12-01'
TRAIN_END_LOW = '2018-12-31'
TRAIN_END = '2018-12-31'
# 評価期間開始日
VAL_START = '2019-02-01'
# 評価期間終了日
VAL_END = '2019-12-01'
# テスト期間開始日
TEST_START = '2020-01-01'
# 目的変数
TARGET_LABELS = ['label_high_20', 'label_low_20']
# データをこの変数に読み込む
dfs = None
# モデルをこの変数に読み込む
models = None
# 対象の銘柄コードをこの変数に読み込む
codes = None
@classmethod
def getCodes(cls):
return cls.codes
@classmethod
def get_inputs(cls, dataset_dir):
"""
Args:
dataset_dir (str) : path to dataset directory
Returns:
dict[str]: path to dataset files
"""
inputs = {
'stock_list': f'{dataset_dir}/stock_list.csv.gz',
'stock_price': f'{dataset_dir}/stock_price.csv.gz',
'stock_fin': f'{dataset_dir}/stock_fin.csv.gz',
# 'stock_fin_price': f'{dataset_dir}/stock_fin_price.csv.gz',
'stock_labels': f'{dataset_dir}/stock_labels.csv.gz',
}
return inputs
@classmethod
def get_dataset(cls, inputs):
"""
Args:
inputs (list[str]): path to dataset files
Returns:
dict[pd.DataFrame]: loaded data
"""
if cls.dfs is None:
cls.dfs = {}
for k, v in inputs.items():
cls.dfs[k] = pd.read_csv(v)
# DataFrameのindexを設定します。
if k == "stock_price":
cls.dfs[k].loc[:, "datetime"] = pd.to_datetime(cls.dfs[k].loc[:, "EndOfDayQuote Date"])
cls.dfs[k].set_index("datetime", inplace=True)
elif k in ["stock_fin", "stock_fin_price", "stock_labels"]:
cls.dfs[k].loc[:, "datetime"] = pd.to_datetime(cls.dfs[k].loc[:, "base_date"])
cls.dfs[k].set_index("datetime", inplace=True)
return cls.dfs
@classmethod
def get_codes(cls, dfs):
"""
Args:
dfs (dict[pd.DataFrame]): loaded data
Returns:
array: list of stock codes
"""
stock_list = dfs['stock_list'].copy()
# 予測対象の銘柄コードを取得
cls.codes = stock_list[stock_list['prediction_target'] == True]['Local Code'].values
@classmethod
def get_features_and_label(cls, dfs, codes, feature, label):
"""
Args:
dfs (dict[pd.DataFrame]): loaded data
codes (array) : target codes
feature (pd.DataFrame): features
label (str) : label column name
Returns:
train_X (pd.DataFrame): training data
train_y (pd.DataFrame): label for train_X
val_X (pd.DataFrame): validation data
val_y (pd.DataFrame): label for val_X
test_X (pd.DataFrame): test data
test_y (pd.DataFrame): label for test_X
"""
# 分割データ用の変数を定義
trains_X, vals_X, tests_X = [], [], []
trains_y, vals_y, tests_y = [], [], []
# 銘柄コード毎に特徴量を作成
print(label,' Create Feature value')
for code in tqdm(codes):
# 特徴量取得
feats = feature[feature['code'] == code]
# 特定の銘柄コードのデータに絞る
stock_labels = dfs['stock_labels'][dfs['stock_labels']['Local Code'] == code].copy()
# 特定の目的変数に絞る
labels = stock_labels[label]
# nanを削除
labels.dropna(inplace=True)
if feats.shape[0] > 0 and labels.shape[0] > 0:
# 特徴量と目的変数のインデックスを合わせる
labels = labels.loc[labels.index.isin(feats.index)]
feats = feats.loc[feats.index.isin(labels.index)]
labels.index = feats.index
# データを分割
_train_X = {}
_val_X = {}
_test_X = {}
_train_y = {}
_val_y = {}
_test_y = {}
if label == 'label_high_20':
_train_X = feats[: cls.TRAIN_END_HIGH].copy()
_val_X = feats[cls.VAL_START : cls.VAL_END].copy()
_test_X = feats[cls.TEST_START :].copy()
_train_y = labels[: cls.TRAIN_END_HIGH].copy()
_val_y = labels[cls.VAL_START : cls.VAL_END].copy()
_test_y = labels[cls.TEST_START :].copy()
elif label == 'label_low_20':
_train_X = feats[: cls.TRAIN_END_LOW].copy()
_val_X = feats[cls.VAL_START : cls.VAL_END].copy()
_test_X = feats[cls.TEST_START :].copy()
_train_y = labels[: cls.TRAIN_END_LOW].copy()
_val_y = labels[cls.VAL_START : cls.VAL_END].copy()
_test_y = labels[cls.TEST_START :].copy()
else:
_train_X = feats[: cls.TRAIN_END].copy()
_val_X = feats[cls.VAL_START : cls.VAL_END].copy()
_test_X = feats[cls.TEST_START :].copy()
_train_y = labels[: cls.TRAIN_END].copy()
_val_y = labels[cls.VAL_START : cls.VAL_END].copy()
_test_y = labels[cls.TEST_START :].copy()
# データを配列に格納 (後ほど結合するため)
trains_X.append(_train_X)
vals_X.append(_val_X)
tests_X.append(_test_X)
trains_y.append(_train_y)
vals_y.append(_val_y)
tests_y.append(_test_y)
# 銘柄毎に作成した説明変数データを結合します。
train_X = pd.concat(trains_X)
val_X = pd.concat(vals_X)
test_X = pd.concat(tests_X)
# 銘柄毎に作成した目的変数データを結合します。
train_y = pd.concat(trains_y)
val_y = pd.concat(vals_y)
test_y = pd.concat(tests_y)
return train_X, train_y, val_X, val_y, test_X, test_y
#増加率の計算
@classmethod
def get_Rate_of_increase(cls, df):
df_return_1 = df.shift(1)
return (df - df_return_1) / df_return_1
@classmethod
def get_features_for_predict(cls, dfs, code, label, start_dt='2016-01-01'):
"""
Args:
dfs (dict) : dict of pd.DataFrame include stock_fin, stock_price
code (int) : A local code for a listed company
start_dt (str): specify date range
Returns:
feature DataFrame (pd.DataFrame)
"""
# 特徴量の作成には過去60営業日のデータを使用しているため、
# 予測対象日からバッファ含めて土日を除く過去90日遡った時点から特徴量を生成します
n = 90
# 特定の銘柄コードのデータに絞る
fin_data = dfs['stock_fin'][dfs['stock_fin']['Local Code'] == code]
# 特徴量の生成対象期間を指定
fin_data = fin_data.loc[pd.Timestamp(start_dt) - pd.offsets.BDay(n) :]
#データを取得
fin_feats = fin_data[['Result_FinancialStatement FiscalYear']].copy()
fin_feats['Result_FinancialStatement NetSales'] = fin_data['Result_FinancialStatement NetSales']
fin_feats['Result_FinancialStatement OperatingIncome'] = fin_data['Result_FinancialStatement OperatingIncome']
fin_feats['Result_FinancialStatement OrdinaryIncome'] = fin_data['Result_FinancialStatement OrdinaryIncome']
fin_feats['Result_FinancialStatement NetIncome'] = fin_data['Result_FinancialStatement NetIncome']
fin_feats['Result_FinancialStatement TotalAssets'] = fin_data['Result_FinancialStatement TotalAssets']
fin_feats['Result_FinancialStatement NetAssets'] = fin_data['Result_FinancialStatement NetAssets']
fin_feats['Result_FinancialStatement CashFlowsFromOperatingActivities'] = fin_data['Result_FinancialStatement CashFlowsFromOperatingActivities']
fin_feats['Result_FinancialStatement CashFlowsFromFinancingActivities'] = fin_data['Result_FinancialStatement CashFlowsFromFinancingActivities']
fin_feats['Result_FinancialStatement CashFlowsFromInvestingActivities'] = fin_data['Result_FinancialStatement CashFlowsFromInvestingActivities']
fin_feats['Forecast_FinancialStatement FiscalYear'] = fin_data['Forecast_FinancialStatement FiscalYear']
fin_feats['Forecast_FinancialStatement NetSales'] = fin_data['Forecast_FinancialStatement NetSales']
fin_feats['Forecast_FinancialStatement OperatingIncome'] = fin_data['Forecast_FinancialStatement OperatingIncome']
fin_feats['Forecast_FinancialStatement OrdinaryIncome'] = fin_data['Forecast_FinancialStatement OrdinaryIncome']
fin_feats['Forecast_FinancialStatement NetIncome'] = fin_data['Forecast_FinancialStatement NetIncome']
fin_feats['Result_Dividend FiscalYear'] = fin_data['Result_Dividend FiscalYear']
fin_feats['Result_Dividend QuarterlyDividendPerShare'] = fin_data['Result_Dividend QuarterlyDividendPerShare']
fin_feats['Forecast_Dividend FiscalYear'] = fin_data['Forecast_Dividend FiscalYear']
fin_feats['Forecast_Dividend QuarterlyDividendPerShare'] = fin_data['Forecast_Dividend QuarterlyDividendPerShare']
fin_feats['Forecast_Dividend AnnualDividendPerShare'] = fin_data['Forecast_Dividend AnnualDividendPerShare']
fin_feats['Result_FinancialStatement ReportType'] = fin_data['Result_FinancialStatement ReportType']
fin_feats['Result_FinancialStatement ReportType'].replace(['Q1','Q2','Q3','Annual',],[0,1,2,3],inplace=True)
# 欠損値処理
fin_feats = fin_feats.fillna(0)
# 特定の銘柄コードのデータに絞る
price_data = dfs['stock_price'][dfs['stock_price']['Local Code'] == code]
# 特徴量の生成対象期間を指定
price_data = price_data.loc[pd.Timestamp(start_dt) - pd.offsets.BDay(n) :]
# 終値のみに絞る
feats = price_data[['EndOfDayQuote ExchangeOfficialClose']].copy()
#高値と安値の差額
price_data['Stock price difference'] = price_data['EndOfDayQuote High'] - price_data['EndOfDayQuote Low']
#騰落幅。前回終値と直近約定値の価格差
feats['EndOfDayQuote ChangeFromPreviousClose'] = price_data['EndOfDayQuote ChangeFromPreviousClose']
#騰落値
feats['EndOfDayQuote RisingAndFallingPrices'] = price_data['EndOfDayQuote PreviousClose'] + price_data['EndOfDayQuote ChangeFromPreviousClose']
#累積調整係数
feats['EndOfDayQuote CumulativeAdjustmentFactor'] = price_data['EndOfDayQuote CumulativeAdjustmentFactor']
#過去0,5,10,15,20日前の株価、出来高
for nn in range(0, 21, 5):
nn_str = str(nn)
#高値
feats['EndOfDayQuote High Return' + nn_str] = price_data['EndOfDayQuote High'].shift(nn)
#安値
feats['EndOfDayQuote Low Return' + nn_str] = price_data['EndOfDayQuote Low'].shift(nn)
#始値
feats['EndOfDayQuote Open Return' + nn_str] = price_data['EndOfDayQuote Open'].shift(nn)
#終値
feats['EndOfDayQuote Close Return' + nn_str] = price_data['EndOfDayQuote Close'].shift(nn)
#売買高
feats['EndOfDayQuote Volume Return' + nn_str] = price_data['EndOfDayQuote Volume'].shift(nn)
#銘柄情報
list_data = dfs['stock_list'][dfs['stock_list']['Local Code'] == code].copy()
#銘柄の33業種区分(コード)
feats['33 Sector(Code)'] = list_data['33 Sector(Code)'].values[0]
#銘柄の17業種区分(コード)
feats['17 Sector(Code)'] = list_data['17 Sector(Code)'].values[0]
#発行済株式数
feats['IssuedShareEquityQuote IssuedShare'] = list_data['IssuedShareEquityQuote IssuedShare'].values[0]
#Size Code (New Index Series)
list_data['Size Code (New Index Series)'] = list_data['Size Code (New Index Series)'].replace('-', 0).astype(int)
million = 1000000
#来期の予測EPS(1株あたりの利益)
forecast_EPS = (fin_feats['Forecast_FinancialStatement NetIncome'] * million) / feats['IssuedShareEquityQuote IssuedShare']
#feats['Forecast EPS'] = forecast_EPS
#来期の予測PER(株価収益率)
feats['Forecast PER ExchangeOfficialClose'] = price_data['EndOfDayQuote ExchangeOfficialClose'] / forecast_EPS
#売買高加重平均価格(VWAP)
feats['EndOfDayQuote VWAP'] = price_data['EndOfDayQuote VWAP']
# 財務データの特徴量とマーケットデータの特徴量のインデックスを合わせる
feats = feats.loc[feats.index.isin(fin_feats.index)]
fin_feats = fin_feats.loc[fin_feats.index.isin(feats.index)]
# データを結合
feats = pd.concat([feats, fin_feats], axis=1).dropna()
#決算種別gごとに分ける
#Q1
q1 = feats.loc[feats['Result_FinancialStatement ReportType'] == 0].copy()
#Q2
q2 = feats.loc[feats['Result_FinancialStatement ReportType'] == 1].copy()
#Q3
q3 = feats.loc[feats['Result_FinancialStatement ReportType'] == 2].copy()
#Annual
annual = feats.loc[feats['Result_FinancialStatement ReportType'] == 3].copy()
#決算
settlement = fin_data[['Forecast_FinancialStatement ReportType']].copy()
settlement['Forecast_FinancialStatement ReportType'].replace(['Q1','Q2','Q3','Annual',],[0,1,2,3],inplace=True)
settlement['Forecast_FinancialStatement FiscalYear'] = fin_data['Forecast_FinancialStatement FiscalYear']
settlement['Forecast_FinancialStatement NetSales'] = fin_data['Forecast_FinancialStatement NetSales']
settlement['Forecast_FinancialStatement OperatingIncome'] = fin_data['Forecast_FinancialStatement OperatingIncome']
settlement['Result_FinancialStatement OperatingIncome'] = fin_data['Result_FinancialStatement OperatingIncome']
#前の行と値が同じかどうか、同じならTrueを格納
settlement['Forecast_FinancialStatement ReportType Flag'] = settlement['Forecast_FinancialStatement ReportType'].eq(settlement['Forecast_FinancialStatement ReportType'].shift(1))
settlement['Forecast_FinancialStatement FiscalYear Flag'] = settlement['Forecast_FinancialStatement FiscalYear'].eq(settlement['Forecast_FinancialStatement FiscalYear'].shift(1))
#0,1に変換
settlement['Forecast_FinancialStatement ReportType Flag'] = settlement['Forecast_FinancialStatement ReportType Flag'] * 1
settlement['Forecast_FinancialStatement FiscalYear Flag'] = settlement['Forecast_FinancialStatement FiscalYear Flag'] * 1
#実行フラグを立てる
settlement['Execution flag'] = ((settlement['Forecast_FinancialStatement ReportType Flag'] == 1) & (settlement['Forecast_FinancialStatement FiscalYear Flag'] == 1))
#実行フラグがTrueなら値を格納
settlement['Forecast_FinancialStatement NetSales Shift'] = 0
settlement['Forecast_FinancialStatement NetSales Shift'].where(settlement['Execution flag'] != True, settlement['Forecast_FinancialStatement NetSales'].shift(1), inplace=True)
settlement['Forecast_FinancialStatement OperatingIncome Shift'] = 0
settlement['Forecast_FinancialStatement OperatingIncome Shift'].where(settlement['Execution flag'] != True, settlement['Forecast_FinancialStatement OperatingIncome'].shift(1), inplace=True)
settlement['Result_FinancialStatement OperatingIncome Shift'] = 0
settlement['Result_FinancialStatement OperatingIncome Shift'].where(settlement['Execution flag'] != True, settlement['Result_FinancialStatement OperatingIncome'].shift(1), inplace=True)
#負債
liabilities = feats['Result_FinancialStatement TotalAssets'] - feats['Result_FinancialStatement NetAssets']
#AnnualのEPS(1株当たり利益)
annual_EPS = (annual['Result_FinancialStatement NetIncome'] * million) / list_data['IssuedShareEquityQuote IssuedShare'].values[0]
if label == 'label_high_20':
#Size Code (New Index Series)
feats['Size Code (New Index Series)'] = list_data['Size Code (New Index Series)'].values[0]
#Annual純利益増加率
annual['Annual Net income increase rate'] = cls.get_Rate_of_increase(annual['Result_FinancialStatement NetIncome'])
#欠損値処理を行います。
annual = annual.replace([np.nan], 0)
feats['Annual Net income increase rate'] = annual['Annual Net income increase rate']
#Q1,Q2,Q3,Annualの営業利益増加率
q1['Q1 Operating income increase rate'] = cls.get_Rate_of_increase(q1['Result_FinancialStatement OperatingIncome'])
q2['Q2 Operating income increase rate'] = cls.get_Rate_of_increase(q2['Result_FinancialStatement OperatingIncome'])
q3['Q3 Operating income increase rate'] = cls.get_Rate_of_increase(q3['Result_FinancialStatement OperatingIncome'])
annual['Annual Operating income increase rate'] = cls.get_Rate_of_increase(annual['Result_FinancialStatement OperatingIncome'])
#欠損値処理を行います。
q1 = q1.replace([np.nan], 0)
q2 = q2.replace([np.nan], 0)
q3 = q3.replace([np.nan], 0)
annual = annual.replace([np.nan], 0)
feats['Q1 Operating income increase rate'] = q1['Q1 Operating income increase rate']
feats['Q2 Operating income increase rate'] = q2['Q2 Operating income increase rate']
feats['Q3 Operating income increase rate'] = q3['Q3 Operating income increase rate']
feats['Annual Operating income increase rate'] = annual['Annual Operating income increase rate']
#Q1,Q2,Q3,Annualの当期純利益増加率
q1['Q1 Net income increase rate'] = cls.get_Rate_of_increase(q1['Result_FinancialStatement NetIncome'])
q2['Q2 Net income increase rate'] = cls.get_Rate_of_increase(q2['Result_FinancialStatement NetIncome'])
q3['Q3 Net income increase rate'] = cls.get_Rate_of_increase(q3['Result_FinancialStatement NetIncome'])
annual['Annual Net income increase rate'] = cls.get_Rate_of_increase(annual['Result_FinancialStatement NetIncome'])
#欠損値処理を行います。
q1 = q1.replace([np.nan], 0)
q2 = q2.replace([np.nan], 0)
q3 = q3.replace([np.nan], 0)
annual = annual.replace([np.nan], 0)
feats['Q1 Net income increase rate'] = q1['Q1 Net income increase rate']
feats['Q2 Net income increase rate'] = q2['Q2 Net income increase rate']
feats['Q3 Net income increase rate'] = q3['Q3 Net income increase rate']
feats['Annual Net income increase rate'] = annual['Annual Net income increase rate']
#PER(株価収益率)
feats['Annual PER'] = price_data['EndOfDayQuote ExchangeOfficialClose'] / annual_EPS
#決算営業利益増加率
feats['Settlement operating income increase rate'] = (settlement['Result_FinancialStatement OperatingIncome'] - settlement['Result_FinancialStatement OperatingIncome Shift']) / settlement['Result_FinancialStatement OperatingIncome Shift']
#欠損値処理を行います。
feats = feats.replace([np.nan], -99999)
#来期決算種別
feats['Forecast_FinancialStatement ReportType'] = settlement['Forecast_FinancialStatement ReportType']
#来期の予想決算売上高増加率
feats['Expected settlement of accounts for the next fiscal year Sales increase rate'] = (settlement['Forecast_FinancialStatement NetSales'] - settlement['Forecast_FinancialStatement NetSales Shift']) / settlement['Forecast_FinancialStatement NetSales Shift']
#売上高増加率
feats['Sales growth rate'] = cls.get_Rate_of_increase(feats['Result_FinancialStatement NetSales'])
#営業利益増加率
feats['Operating income increase rate'] = cls.get_Rate_of_increase(feats['Result_FinancialStatement OperatingIncome'])
#経常利益増加率
feats['Ordinary income increase rate'] = cls.get_Rate_of_increase(feats['Result_FinancialStatement OrdinaryIncome'])
#BPS(1株あたりの純資産)
BPS = (feats['Result_FinancialStatement NetAssets'] * million) / feats['IssuedShareEquityQuote IssuedShare']
#PBR(株価純資産倍率)
feats['PBR'] = feats['EndOfDayQuote ExchangeOfficialClose'] / BPS
#CFPS(1株あたりのキャッシュフロー)
CFPS = (feats['Result_FinancialStatement CashFlowsFromOperatingActivities'] * million) / feats['IssuedShareEquityQuote IssuedShare']
#PCFR(株価キャッシュフロー倍率)
feats['PCFR'] = feats['EndOfDayQuote ExchangeOfficialClose'] / CFPS
#来期の予測配当利回り
feats['Forecast Dividend yield'] = feats['Forecast_Dividend AnnualDividendPerShare'] / feats['EndOfDayQuote ExchangeOfficialClose']
#時価総額
feats['Market capitalization'] = (feats['EndOfDayQuote ExchangeOfficialClose'] * million) * feats['IssuedShareEquityQuote IssuedShare']
#キャッシュフローマージン
feats['Forecast Cash flow margin'] = feats['Result_FinancialStatement CashFlowsFromOperatingActivities'] / feats['Forecast_FinancialStatement NetSales']
#高値と安値の5日間の差額の平均
feats['Stock price difference Mean 5'] = price_data['Stock price difference'].rolling(5).mean()
#5日間平均から当日株価を引く
EndOfDayQuote_ExchangeOfficialClose_Mean_5 = price_data['EndOfDayQuote ExchangeOfficialClose'].rolling(5).mean()
feats['Subtract the current days stock price from the 5-day average'] = EndOfDayQuote_ExchangeOfficialClose_Mean_5 - feats['EndOfDayQuote ExchangeOfficialClose']
#売上高に対しての負債割合
feats['Ratio of sales to liabilities'] = liabilities / feats['Result_FinancialStatement NetSales']
#負債増加率
feats['Debt growth rate'] = cls.get_Rate_of_increase(liabilities)
#終値の20営業日ボラティリティ
feats['20 business days volatility'] = (np.log(price_data['EndOfDayQuote ExchangeOfficialClose']).diff().rolling(20).std())
#終値の40営業日ボラティリティ
feats['40 business days volatility'] = (np.log(price_data['EndOfDayQuote ExchangeOfficialClose']).diff().rolling(40).std())
#終値の60営業日ボラティリティ
feats['60 business days volatility'] = (np.log(price_data['EndOfDayQuote ExchangeOfficialClose']).diff().rolling(60).std())
#終値の20営業日リターン
feats['20 business day return'] = price_data['EndOfDayQuote ExchangeOfficialClose'].pct_change(20)
#ドロップ
for nn in range(0, 21, 5):
nn_str = str(nn)
feats = feats.drop(['EndOfDayQuote High Return' + nn_str], axis=1)
feats = feats.drop(['EndOfDayQuote Low Return' + nn_str], axis=1)
feats = feats.drop(['EndOfDayQuote Open Return' + nn_str], axis=1)
feats = feats.drop(['EndOfDayQuote Close Return' + nn_str], axis=1)
elif label == 'label_low_20':
#Q1,Q2,Q3,Annualの売上高増加率
q1['Q1 Sales growth rate'] = cls.get_Rate_of_increase(q1['Result_FinancialStatement NetSales'])
q2['Q2 Sales growth rate'] = cls.get_Rate_of_increase(q2['Result_FinancialStatement NetSales'])
q3['Q3 Sales growth rate'] = cls.get_Rate_of_increase(q3['Result_FinancialStatement NetSales'])
annual['Annual Sales growth rate'] = cls.get_Rate_of_increase(annual['Result_FinancialStatement NetSales'])
#欠損値処理を行います。
q1 = q1.replace([np.nan], 0)
q2 = q2.replace([np.nan], 0)
q3 = q3.replace([np.nan], 0)
annual = annual.replace([np.nan], 0)
feats['Q1 Sales growth rate'] = q1['Q1 Sales growth rate']
feats['Q2 Sales growth rate'] = q2['Q2 Sales growth rate']
feats['Q3 Sales growth rate'] = q3['Q3 Sales growth rate']
feats['Annual Sales growth rate'] = annual['Annual Sales growth rate']
#Annual財務キャッシュフロー増加率
annual['Annual Rate of increase in financial cash flow'] = cls.get_Rate_of_increase(annual['Result_FinancialStatement CashFlowsFromFinancingActivities'])
#欠損値処理を行います。
annual = annual.replace([np.nan], 0)
feats['Annual Rate of increase in financial cash flow'] = annual['Annual Rate of increase in financial cash flow']
#Annual EPS(1株当たり利益)
feats['Annual EPS'] = annual_EPS
#欠損値処理を行います。
feats = feats.replace([np.nan], -99999)
#来期の予想決算営業利益増加率
feats['Expected settlement of accounts for the next fiscal year Operating income increase rate'] = (settlement['Forecast_FinancialStatement OperatingIncome'] - settlement['Forecast_FinancialStatement OperatingIncome Shift']) / settlement['Forecast_FinancialStatement OperatingIncome Shift']
#負債比率
feats['Debt ratio'] = liabilities / feats['Result_FinancialStatement NetAssets']
#利益率
Profit_rate = feats['Result_FinancialStatement NetIncome'] / feats['Result_FinancialStatement NetSales']
#利益率増加率
feats['Profit margin increase rate'] = cls.get_Rate_of_increase(Profit_rate)
#自己資本比率
feats['equity_ratio'] = feats['Result_FinancialStatement NetAssets'] / feats['Result_FinancialStatement TotalAssets']
#純利益増加率
feats['Net income increase rate'] = cls.get_Rate_of_increase(feats['Result_FinancialStatement NetIncome'])
#EPS(1株当たり利益)
EPS = feats['Result_FinancialStatement NetIncome'] / feats['IssuedShareEquityQuote IssuedShare']
#PER(株価収益率)
PER = price_data['EndOfDayQuote ExchangeOfficialClose'] / EPS
#目標株価
feats['Target stock price'] = EPS * PER
#ドロップ
feats = feats.drop(['EndOfDayQuote RisingAndFallingPrices','Result_FinancialStatement TotalAssets',
'Result_FinancialStatement CashFlowsFromOperatingActivities',
'Forecast_Dividend QuarterlyDividendPerShare','Result_FinancialStatement CashFlowsFromFinancingActivities',
'Forecast_FinancialStatement FiscalYear','Result_Dividend FiscalYear',
'Forecast_FinancialStatement NetIncome', 'Forecast_FinancialStatement OperatingIncome',
'Forecast_FinancialStatement NetSales','Result_FinancialStatement OrdinaryIncome',], axis=1)
feats = feats.drop(['EndOfDayQuote ExchangeOfficialClose',], axis=1)
# 欠損値処理を行います。
feats = feats.replace([np.inf, -np.inf, np.nan], 0)
# 銘柄コードを設定
feats['code'] = code
# 生成対象日以降の特徴量に絞る
feats = feats.loc[pd.Timestamp(start_dt) :]
return feats
@classmethod
def create_model(cls, dfs, codes, label):
"""
Args:
dfs (dict) : dict of pd.DataFrame include stock_fin, stock_price
codes (list[int]): A local code for a listed company
label (str): prediction target label
Returns:
lgb.LGBMRegressor
"""
# 特徴量を取得
buff = []
print(label,' Get Feature value')
for code in tqdm(codes):
buff.append(cls.get_features_for_predict(cls.dfs,code,label))
feature = | pd.concat(buff) | pandas.concat |
"""Twitter view"""
__docformat__ = "numpy"
import logging
import os
from typing import Optional, List
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from dateutil import parser as dparse
import gamestonk_terminal.config_plot as cfg_plot
from gamestonk_terminal.config_terminal import theme
from gamestonk_terminal.common.behavioural_analysis import twitter_model
from gamestonk_terminal.decorators import log_start_end
from gamestonk_terminal.helper_funcs import export_data, plot_autoscale
from gamestonk_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_inference(ticker: str, num: int, export: str = ""):
"""Infer sentiment from past n tweets
Parameters
----------
ticker: str
Stock ticker
num: int
Number of tweets to analyze
export: str
Format to export tweet dataframe
"""
df_tweets = twitter_model.load_analyze_tweets(ticker, num)
if df_tweets.empty:
return
# Parse tweets
dt_from = dparse.parse(df_tweets["created_at"].values[-1])
dt_to = dparse.parse(df_tweets["created_at"].values[0])
console.print(f"From: {dt_from.strftime('%Y-%m-%d %H:%M:%S')}")
console.print(f"To: {dt_to.strftime('%Y-%m-%d %H:%M:%S')}")
console.print(f"{len(df_tweets)} tweets were analyzed.")
dt_delta = dt_to - dt_from
n_freq = dt_delta.total_seconds() / len(df_tweets)
console.print(f"Frequency of approx 1 tweet every {round(n_freq)} seconds.")
pos = df_tweets["positive"]
neg = df_tweets["negative"]
percent_pos = len(np.where(pos > neg)[0]) / len(df_tweets)
percent_neg = len(np.where(pos < neg)[0]) / len(df_tweets)
total_sent = np.round(np.sum(df_tweets["sentiment"]), 2)
mean_sent = np.round(np.mean(df_tweets["sentiment"]), 2)
console.print(f"The summed compound sentiment of {ticker} is: {total_sent}")
console.print(f"The average compound sentiment of {ticker} is: {mean_sent}")
console.print(
f"Of the last {len(df_tweets)} tweets, {100*percent_pos:.2f} % had a higher positive sentiment"
)
console.print(
f"Of the last {len(df_tweets)} tweets, {100*percent_neg:.2f} % had a higher negative sentiment"
)
console.print("")
export_data(export, os.path.dirname(os.path.abspath(__file__)), "infer", df_tweets)
@log_start_end(log=logger)
def display_sentiment(
ticker: str,
n_tweets: int,
n_days_past: int,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plot sentiments from ticker
Parameters
----------
ticker: str
Stock to get sentiment for
n_tweets: int
Number of tweets to get per hour
n_days_past: int
Number of days to extract tweets for
export: str
Format to export tweet dataframe
"""
# Date format string required by twitter
dt_format = "%Y-%m-%dT%H:%M:%SZ"
# Algorithm to extract
dt_recent = datetime.utcnow() - timedelta(seconds=20)
dt_old = dt_recent - timedelta(days=n_days_past)
console.print(
f"From {dt_recent.date()} retrieving {n_tweets*24} tweets ({n_tweets} tweets/hour)"
)
df_tweets = pd.DataFrame(
columns=[
"created_at",
"text",
"sentiment",
"positive",
"negative",
"neutral",
]
)
while True:
# Iterate until we haven't passed the old number of days
if dt_recent < dt_old:
break
# Update past datetime
dt_past = dt_recent - timedelta(minutes=60)
temp = twitter_model.load_analyze_tweets(
ticker,
n_tweets,
start_time=dt_past.strftime(dt_format),
end_time=dt_recent.strftime(dt_format),
)
if temp.empty:
return
df_tweets = pd.concat([df_tweets, temp])
if dt_past.day < dt_recent.day:
console.print(
f"From {dt_past.date()} retrieving {n_tweets*24} tweets ({n_tweets} tweets/hour)"
)
# Update recent datetime
dt_recent = dt_past
# Sort tweets per date
df_tweets.sort_index(ascending=False, inplace=True)
df_tweets["cumulative_compound"] = df_tweets["sentiment"].cumsum()
df_tweets["prob_sen"] = 1
# df_tweets.to_csv(r'notebooks/tweets.csv', index=False)
df_tweets.reset_index(inplace=True)
df_tweets["Month"] = pd.to_datetime(df_tweets["created_at"]).apply(
lambda x: x.month
)
df_tweets["Day"] = pd.to_datetime(df_tweets["created_at"]).apply(lambda x: x.day)
df_tweets["date"] = pd.to_datetime(df_tweets["created_at"])
df_tweets = df_tweets.sort_values(by="date")
df_tweets["cumulative_compound"] = df_tweets["sentiment"].cumsum()
# This plot has 2 axis
if external_axes is None:
_, axes = plt.subplots(
2, 1, sharex=True, figsize=plot_autoscale(), dpi=cfg_plot.PLOT_DPI
)
ax1, ax2 = axes
else:
if len(external_axes) != 2:
console.print("[red]Expected list of one axis item./n[/red]")
return
(ax1, ax2) = external_axes
ax1.plot(
pd.to_datetime(df_tweets["created_at"]),
df_tweets["cumulative_compound"].values,
)
ax1.set_ylabel("\nCumulative\nVADER Sentiment")
for _, day_df in df_tweets.groupby(by="Day"):
day_df["time"] = | pd.to_datetime(day_df["created_at"]) | pandas.to_datetime |
import os
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from skimage.measure import label, regionprops
from tqdm import tqdm
from .nii_dataset import NiiDataset
__all__ = ["froc", "plot_froc", "evaluate"]
# detection key FP values
DEFAULT_KEY_FP = (0.5, 1, 2, 4, 8)
# classification confusion matrix settings
label_code_dict = {
0: "Background",
1: "Displaced",
2: "Nondisplaced",
3: "Buckle",
4: "Segmental",
-1: "Ignore"
}
# for ground truth; FP: false positive detections;
# Ignore: undefined labels in annotations
clf_conf_mat_cols = ["Buckle", "Displaced", "Nondisplaced", "Segmental",
"FP", "Ignore"]
# for prediction; FN: false negative, hit no gt labels
clf_conf_mat_rows = ["Buckle", "Displaced", "Nondisplaced", "Segmental", "FN"]
pd.set_option("display.precision", 6)
def _get_gt_class(x):
# if GT classification exists, use it
if not | pd.isna(x["gt_class"]) | pandas.isna |
import logging
from collections import defaultdict
from concurrent.futures import FIRST_EXCEPTION, wait
from itertools import product
from pathlib import Path
from typing import Iterable, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
from hermes.typeo import typeo
from rich.progress import Progress
from bbhnet.analysis.analysis import integrate
from bbhnet.analysis.distributions import DiscreteDistribution
from bbhnet.analysis.normalizers import GaussianNormalizer
from bbhnet.io.h5 import write_timeseries
from bbhnet.io.timeslides import Segment, TimeSlide
from bbhnet.logging import configure_logging
from bbhnet.parallelize import AsyncExecutor, as_completed
event_times = [1186302519.8, 1186741861.5, 1187058327.1, 1187529256.5]
event_names = ["GW170809", "GW170814", "GW170818", "GW170823"]
events = {name: time for name, time in zip(event_names, event_times)}
def load_segment(segment: Segment):
"""
Quick utility function which just wraps a Segment's
`load` method so that we can execute it in a process
pool since methods aren't picklable.
"""
segment.load("out")
return segment
def get_write_dir(
write_dir: Path, norm: Optional[float], shift: Union[str, Segment]
) -> Path:
"""
Quick utility function for getting the name of the directory
to which to save the outputs from an analysis using a particular
time-shift/norm-seconds combination
"""
if isinstance(shift, Segment):
shift = shift.shift
write_dir = write_dir / f"norm-seconds.{norm}" / shift
write_dir.mkdir(parents=True, exist_ok=True)
return write_dir
def build_background(
thread_ex: AsyncExecutor,
process_ex: AsyncExecutor,
pbar: Progress,
background_segments: Iterable[Segment],
data_dir: Path,
write_dir: Path,
max_tb: float,
window_length: float = 1.0,
norm_seconds: Optional[Iterable[float]] = None,
num_bins: int = int(1e4),
):
"""
For a sequence of background segments, compute a discrete
distribution of integrated neural network outputs using
the indicated integration window length for each of the
normalization window lengths specified. Iterates through
the background segments in order and tries to find as
many time-shifts available for each segment as possible
in the specified data directory, stopping iteration through
segments once a maximum number of seconds of bacgkround have
been generated.
As a warning, there's a fair amount of asynchronous execution
going on in this function, and it may come off a bit complex.
Args:
thread_ex:
An `AsyncExecutor` that maintains a thread pool
for writing analyzed segments in parallel with
the analysis processes themselves.
process_ex:
An `AsyncExecutor` that maintains a process pool
for loading and integrating Segments of neural
network outputs.
pbar:
A `rich.progress.Progress` object for keeping
track of the progress of each of the various
subtasks.
background_segments:
The `Segment` objects to use for building a
background distribution. `data_dir` will be
searched for all time-shifts of each segment
for parallel analysis. Once `max_tb` seconds
worth of background have been generated, iteration
through this array will be terminated, so segments
should be ordered by some level of "importance",
since it's likely that segments near the back of the
array won't be analyzed for lower values of `max_tb`.
data_dir:
Directory containing timeslide root directories,
which will be mined for time-shifts of each `Segment`
in `background_segments`. If a time-shift doesn't exist
for a given `Segment`, the time-shift is ignored.
write_dir:
Root directory to which to write integrated NN outputs.
For each time-shift analyzed and normalization window
length specified in `norm_seconds`, results will be
written to a subdirectory
`write_dir / "norm-seconds.{norm}" / shift`, which
will be created if it does not exist.
max_tb:
The maximum number of seconds of background data
to analyze for each value of `norm_seconds` before
new segments to shift and analyze are no longer sought.
However, because we use _every_ time-shift for each
segment we iterate through, its possible that each
background distribution will utilize slightly more
than this value.
window_length:
The length of the integration window to use
for analysis in seconds.
norm_seconds:
An array of normalization window lengths to use
to standardize the integrated neural network outputs.
(i.e. the output timeseries is the integral over the
previous `window_length` seconds, normalized by the
mean and standard deviation of the previous `norm`
seconds before that, where `norm` is each value in
`norm_seconds`). A `norm` value of `None` in the
`norm_seconds` iterable indicates
no normalization, and if `norm_seconds` is left as
`None` this will be the only value used.
num_bins:
The number of bins to use to initialize the discrete
distribution used to characterize the background
distribution.
Returns:
A dictionary mapping each value in `norm_seconds` to
an associated `DiscreteDistribution` characterizing
its background distribution.
"""
write_dir.mkdir(exist_ok=True)
norm_seconds = norm_seconds or [norm_seconds]
# keep track of the min and max values of each normalization
# window's background and the corresponding filenames so
# that we can fit a discrete distribution to it after the fact
mins = defaultdict(lambda: float("inf"))
maxs = defaultdict(lambda: -float("inf"))
# keep track of all the files that we've written
# for each normalization window size so that we
# can iterate through them later and submit them
# for reloading once we have our distributions initialized
fname_futures = defaultdict(list)
# iterate through timeshifts of our background segments
# until we've generated enough background data.
background_segments = iter(background_segments)
main_task_id = pbar.add_task("[red]Building background", total=max_tb)
while not pbar.tasks[main_task_id].finished:
segment = next(background_segments)
# since we're assuming here that the background
# segments are being provided in reverse chronological
# order (with segments closest to the event segment first),
# exhaust all the time shifts we can of each segment before
# going to the previous one to keep data as fresh as possible
load_futures = {}
for shift in data_dir.iterdir():
try:
shifted = segment.make_shift(shift.name)
except ValueError:
# this segment doesn't have a shift
# at this value, so just move on
continue
# load all the timeslides up front in a separate thread
# TODO: O(1GB) memory means segment.length * N ~O(4M),
# so for ~O(10k) long segments this means this should
# be fine as long as N ~ O(100). Worth doing a check for?
future = process_ex.submit(load_segment, shifted)
load_futures[shift.name] = [future]
# create progress bar tasks for each one
# of the subprocesses involved for analyzing
# this set of timeslides
load_task_id = pbar.add_task(
f"[cyan]Loading {len(load_futures)} {segment.length}s timeslides",
total=len(load_futures),
)
analyze_task_id = pbar.add_task(
"[yelllow]Integrating timeslides",
total=len(load_futures) * len(norm_seconds),
)
write_task_id = pbar.add_task(
"[green]Writing integrated timeslides",
total=len(load_futures) * len(norm_seconds),
)
# now once each segment is loaded, submit a job
# to our process pool to integrate it using each
# one of the specified normalization periods
integration_futures = {}
sample_rate = None
for shift, seg in as_completed(load_futures):
# get the sample rate of the NN output timeseries
# dynamically from the first timeseries we load,
# since we'll need it to initialize our normalizers
if sample_rate is None:
t = seg._cache["t"]
sample_rate = 1 / (t[1] - t[0])
for norm in norm_seconds:
# build a normalizer for the given normalization window length
if norm is not None:
normalizer = GaussianNormalizer(norm * sample_rate)
else:
normalizer = None
# submit the integration job and have it update the
# corresponding progress bar task once it completes
future = process_ex.submit(
integrate,
seg,
kernel_length=1.0,
window_length=window_length,
normalizer=normalizer,
)
future.add_done_callback(
lambda f: pbar.update(analyze_task_id, advance=1)
)
integration_futures[(norm, shift)] = [future]
# advance the task keeping track of how many files
# we've loaded by one
pbar.update(load_task_id, advance=1)
# make sure we have the expected number of jobs submitted
if len(integration_futures) < (len(norm_seconds) * len(load_futures)):
raise ValueError(
"Expected {} integration jobs submitted, "
"but only found {}".format(
len(norm_seconds) * len(load_futures),
len(integration_futures),
)
)
# as the integration jobs come back, write their
# results using our thread pool and record the
# min and max values for our discrete distribution
segment_futures = []
for (norm, shift), (t, y, integrated) in as_completed(
integration_futures
):
# submit the writing job to our thread pool and
# use a callback to keep track of all the filenames
# for a given normalization window
shift_dir = get_write_dir(write_dir, norm, shift)
future = thread_ex.submit(
write_timeseries,
shift_dir,
t=t,
y=y,
integrated=integrated,
)
future.add_done_callback(
lambda f: pbar.update(write_task_id, advance=1)
)
fname_futures[norm].append(future)
segment_futures.append(future)
# keep track of the max and min values for each norm
mins[norm] = min(mins[norm], integrated.min())
maxs[norm] = max(maxs[norm], integrated.max())
# wait for all the writing to finish before we
# move on so that we don't overload our processes
wait(segment_futures, return_when=FIRST_EXCEPTION)
pbar.update(main_task_id, advance=len(load_futures) * segment.length)
# now that we've analyzed enough background data,
# we'll initialize background distributions using
# the min and max bounds we found during analysis
# and then load everything back in to bin them
# within these bounds
Tb = pbar.tasks[main_task_id].completed
logging.info(f"Accumulated {Tb}s of background matched filter outputs.")
# submit a bunch of jobs for loading these integrated
# segments back in for discretization
load_futures = defaultdict(list)
for norm, fname in as_completed(fname_futures):
future = process_ex.submit(load_segment, Segment(fname))
load_futures[norm].append(future)
# create a task for each one of the normalization windows
# tracking how far along the distribution fit is
fit_task_ids = {}
for norm in norm_seconds:
norm_name = f"{norm}s" if norm is not None else "empty"
task_id = pbar.add_task(
"[purple]Fitting background using {} normalization window".format(
norm_name
),
total=len(load_futures[norm]),
)
fit_task_ids[norm] = task_id
# now discretized the analyzed segments as they're loaded back in
backgrounds = {}
for norm, segment in as_completed(load_futures):
try:
# if we already have a background distribution
# for this event, grab it and fit it with a
# "warm start" aka don't ditch the existing histogram
background = backgrounds[norm]
warm_start = True
except KeyError:
# otherwise create a new distribution
# and fit it from scratch
mn, mx = mins[norm], maxs[norm]
background = DiscreteDistribution("integrated", mn, mx, num_bins)
backgrounds[norm] = background
warm_start = False
# fit the distribution to the new data and then
# update the corresponding task tracker
background.fit(segment, warm_start=warm_start)
pbar.update(fit_task_ids[norm], advance=1)
return backgrounds
def check_if_needs_analyzing(
event_segment: Segment,
norm_seconds: Iterable[Optional[float]],
characterizations: pd.DataFrame,
) -> Iterable[Optional[float]]:
times = [t for t in event_times if t in event_segment]
names = [name for name in event_names if events[name] in times]
combos = set(product(names, norm_seconds))
remaining = combos - set(characterizations.index)
# only do analysis on those normalization
# values that we haven't already done
# (sorry, you'll still have to do it for all events,
# but those are miniscule by comparison)
norm_seconds = list(set([j for i, j in remaining]))
return norm_seconds, names, times
def analyze_event(
thread_ex: AsyncExecutor,
process_ex: AsyncExecutor,
characterizations: pd.DataFrame,
timeseries: pd.DataFrame,
event_segment: Segment,
background_segments: Iterable[Segment],
data_dir: Path,
write_dir: Path,
results_dir: Path,
max_tb: float,
window_length: float = 1.0,
norm_seconds: Optional[Iterable[float]] = None,
num_bins: int = int(1e4),
force: bool = False,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Use timeshifts of a set of previous segments to build a
background distribution with which to analyze a segment
containing an event and characterizing the false alaram
rate of that event as a function of time from the event
trigger.
"""
# first check if we can skip this analysis altogether
# because we already have data on it and we're not
# forcing ourselves to re-analyze
norm_seconds = norm_seconds or [norm_seconds]
if not force:
norm_seconds, names, times = check_if_needs_analyzing(
event_segment, norm_seconds, characterizations
)
if len(norm_seconds) == 0:
logging.info(
f"Already analyzed events in segment {event_segment}, skipping"
)
return
with Progress() as pbar:
# TODO: exclude segments with events?
backgrounds = build_background(
thread_ex,
process_ex,
pbar,
background_segments=background_segments,
data_dir=data_dir,
write_dir=write_dir,
window_length=window_length,
norm_seconds=norm_seconds,
max_tb=max_tb,
num_bins=num_bins,
)
# now use the fit background to characterize the
# significance of BBHNet's detection around the event
for norm, background in backgrounds.items():
if norm is not None:
normalizer = GaussianNormalizer(norm)
else:
normalizer = None
logging.info(
"Characterizing events {} with normalization "
"window length {}".format(", ".join(names), norm)
)
t, y, integrated = integrate(
event_segment,
kernel_length=1,
window_length=window_length,
normalizer=normalizer,
)
fname = write_timeseries(
get_write_dir(write_dir, norm, event_segment),
t=t,
y=y,
integrated=integrated,
)
# create a segment and add the existing data to
# its cache so that we don't try to load it again
segment = Segment(fname)
segment._cache = {"t": t, "integrated": integrated}
fars, latencies = background.characterize_events(
segment, times, window_length=window_length, metric="far"
)
# for each one of the events in this segment,
# record the false alarm rate as a function of
# time and add it to our dataframe then checkpoint it.
# Then isolate the timeseries of both the NN outputs and
# the integrated values around the event and write those
# to another dataframe and checkpoint that as well
for far, latency, name, time in zip(fars, latencies, names, times):
logging.info(f"\t{name}:")
logging.info(f"\t\tFalse Alarm Rates: {list(far)}")
logging.info(f"\t\tLatencies: {list(latency)}")
df = pd.DataFrame(
dict(
event_name=[name] * len(far),
norm_seconds=[norm] * len(far),
far=far,
latency=latency,
)
).set_index(["event_name", "norm_seconds"])
characterizations = | pd.concat([characterizations, df]) | pandas.concat |
import time
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
from tqdm import tqdm
from course_lib.Base.BaseRecommender import BaseRecommender
from src.data_management.data_preprocessing_fm import sample_negative_interactions_uniformly
from src.utils.general_utility_functions import get_total_number_of_users, get_total_number_of_items
from sklearn.preprocessing import MinMaxScaler
def preprocess_dataframe_after_reading(df: pd.DataFrame):
df = df.copy()
df = df.sort_values(by="user_id", ascending=True)
df = df.reset_index()
df = df.drop(columns=["index"], inplace=False)
return df
def get_valid_dataframe_second_version(user_id_array, cutoff, main_recommender, path, mapper, recommender_list,
URM_train, user_factors=None, item_factors=None):
data_frame = get_boosting_base_dataframe(user_id_array=user_id_array, top_recommender=main_recommender,
exclude_seen=True, cutoff=cutoff)
for rec in recommender_list:
data_frame = add_recommender_predictions(data_frame=data_frame, recommender=rec,
column_name=rec.RECOMMENDER_NAME)
data_frame = advanced_subclass_handling(data_frame=data_frame, URM_train=URM_train, path=path)
data_frame = add_ICM_information(data_frame=data_frame, path=path, one_hot_encoding_subclass=False,
use_subclass=True)
data_frame = add_UCM_information(data_frame=data_frame, path=path, user_mapper=mapper)
data_frame = add_user_len_information(data_frame=data_frame, URM_train=URM_train)
data_frame = add_item_popularity(data_frame=data_frame, URM_train=URM_train)
if user_factors is not None:
data_frame = add_user_factors(data_frame=data_frame, user_factors=user_factors)
if item_factors is not None:
data_frame = add_item_factors(data_frame=data_frame, item_factors=item_factors)
data_frame = data_frame.sort_values(by="user_id", ascending=True)
data_frame = data_frame.reset_index()
data_frame = data_frame.drop(columns=["index"], inplace=False)
return data_frame
def get_train_dataframe_proportion(user_id_array, cutoff, main_recommender, path, mapper, recommender_list,
URM_train, proportion, user_factors=None, item_factors=None,
negative_label_value=0, threshold=0.7):
data_frame = get_boosting_base_dataframe(user_id_array=user_id_array, top_recommender=main_recommender,
exclude_seen=False, cutoff=cutoff)
labels, non_zero_count, _ = get_label_array(data_frame, URM_train)
data_frame['label'] = labels
data_frame = add_random_negative_ratings(data_frame=data_frame, URM_train=URM_train, proportion=proportion,
negative_label_value=negative_label_value)
data_frame = data_frame.sort_values(by="user_id", ascending=True)
data_frame = data_frame.reset_index()
data_frame = data_frame.drop(columns=["index"], inplace=False)
for rec in recommender_list:
data_frame = add_recommender_predictions(data_frame=data_frame, recommender=rec,
column_name=rec.RECOMMENDER_NAME)
# Add labels value in order to differentiate more the elements
mask = (data_frame[rec.RECOMMENDER_NAME] > threshold) & (data_frame['label'] > 0)
print("\t Score greater than threshold: {}/{}".format(np.sum(mask), non_zero_count))
data_frame.loc[mask, 'label'] += 1
print("Labels greater than 1: {}".format(np.sum(data_frame['label'] > 1)))
data_frame = advanced_subclass_handling(data_frame=data_frame, URM_train=URM_train, path=path, add_subclass=False)
data_frame = add_ICM_information(data_frame=data_frame, path=path, one_hot_encoding_subclass=False,
use_subclass=True)
data_frame = add_UCM_information(data_frame=data_frame, path=path, user_mapper=mapper)
data_frame = add_user_len_information(data_frame=data_frame, URM_train=URM_train)
data_frame = add_item_popularity(data_frame=data_frame, URM_train=URM_train)
if user_factors is not None:
data_frame = add_user_factors(data_frame=data_frame, user_factors=user_factors)
if item_factors is not None:
data_frame = add_item_factors(data_frame=data_frame, item_factors=item_factors)
data_frame = data_frame.sort_values(by="user_id", ascending=True)
data_frame = data_frame.reset_index()
data_frame = data_frame.drop(columns=["index"], inplace=False)
return data_frame
def get_dataframe_all_data(user_id_array, path, mapper, recommender_list,
URM_train, proportion, user_factors=None, item_factors=None):
negative_URM = sample_negative_interactions_uniformly(negative_sample_size=len(URM_train.data) * proportion,
URM=URM_train)
data_frame = get_dataframe_URM(user_id_array=user_id_array, URM_train=URM_train + negative_URM)
labels, _, _ = get_label_array(data_frame, URM_train)
data_frame['label'] = labels
data_frame = data_frame.sort_values(by="user_id", ascending=True)
data_frame = data_frame.reset_index()
data_frame = data_frame.drop(columns=["index"], inplace=False)
for rec in recommender_list:
data_frame = add_recommender_predictions(data_frame=data_frame, recommender=rec,
column_name=rec.RECOMMENDER_NAME)
data_frame = advanced_subclass_handling(data_frame=data_frame, URM_train=URM_train, path=path, add_subclass=False)
data_frame = add_ICM_information(data_frame=data_frame, path=path, one_hot_encoding_subclass=False,
use_subclass=True)
data_frame = add_UCM_information(data_frame=data_frame, path=path, user_mapper=mapper)
data_frame = add_user_len_information(data_frame=data_frame, URM_train=URM_train)
data_frame = add_item_popularity(data_frame=data_frame, URM_train=URM_train)
if user_factors is not None:
data_frame = add_user_factors(data_frame=data_frame, user_factors=user_factors)
if item_factors is not None:
data_frame = add_item_factors(data_frame=data_frame, item_factors=item_factors)
data_frame = data_frame.sort_values(by="user_id", ascending=True)
data_frame = data_frame.reset_index()
data_frame = data_frame.drop(columns=["index"], inplace=False)
return data_frame
def get_dataframe_first_version(user_id_array, remove_seen_flag, cutoff, main_recommender, path, mapper,
recommender_list,
URM_train):
# Get dataframe for these users
data_frame = get_boosting_base_dataframe(user_id_array=user_id_array, exclude_seen=remove_seen_flag,
cutoff=cutoff, top_recommender=main_recommender)
for rec in recommender_list:
data_frame = add_recommender_predictions(data_frame=data_frame, recommender=rec,
column_name=rec.RECOMMENDER_NAME)
data_frame = add_ICM_information(data_frame=data_frame, path=path)
data_frame = add_UCM_information(data_frame=data_frame, path=path, user_mapper=mapper)
data_frame = add_user_len_information(data_frame=data_frame, URM_train=URM_train)
data_frame = data_frame.sort_values(by="user_id", ascending=True)
data_frame = data_frame.reset_index()
data_frame.drop(columns=["index"], inplace=False)
return data_frame
def add_user_factors(data_frame: pd.DataFrame, user_factors: np.ndarray):
"""
Add user factors to the dataframe
:param data_frame:
:param user_factors:
:return:
"""
print("Adding user factors...")
data_frame = data_frame.copy()
user_factors_df = pd.DataFrame(data=user_factors,
index=np.arange(0, user_factors.shape[0]),
columns=["user_factor_{}".format(i + 1) for i in range(user_factors.shape[1])])
data_frame = pd.merge(data_frame, user_factors_df, left_on="user_id", right_index=True)
return data_frame
def add_item_factors(data_frame: pd.DataFrame, item_factors: np.ndarray):
"""
Add item factors to the dataframe
:param data_frame:
:param item_factors:
:return:
"""
print("Adding item factors...")
data_frame = data_frame.copy()
item_factors_df = pd.DataFrame(data=item_factors,
index=np.arange(0, item_factors.shape[0]),
columns=["item_factor_{}".format(i + 1) for i in range(item_factors.shape[1])])
data_frame = pd.merge(data_frame, item_factors_df, left_on="item_id", right_index=True)
return data_frame
def add_item_popularity(data_frame: pd.DataFrame, URM_train: csr_matrix):
"""
Add the item popularity to the dataframe
:param data_frame: data frame containing information for boosting
:param URM_train: URM train matrix
:return: dataframe containing boosting information + item popularity
"""
print("Adding item popularity...")
data_frame = data_frame.copy()
pop_items = (URM_train > 0).sum(axis=0)
pop_items = np.array(pop_items).squeeze()
item_ids = np.arange(URM_train.shape[1])
data = np.array([item_ids, pop_items])
data = np.transpose(data)
new_df = pd.DataFrame(data=data, columns=["row", "item_pop"])
data_frame = pd.merge(data_frame, new_df, left_on="item_id", right_on="row")
data_frame = data_frame.drop(columns=["row"], inplace=False)
return data_frame
def get_label_array(data_frame: pd.DataFrame, URM_train: csr_matrix):
"""
Create a dataframe with a single column with the correct predictions
:param data_frame: data frame containing information for boosting
:param URM_train: URM train matrix
:return: numpy array containing y information
"""
print("Retrieving training labels...")
user_ids = data_frame['user_id'].values
item_ids = data_frame['item_id'].values
y = np.zeros(user_ids.size, dtype=np.int)
labels = np.array(URM_train[user_ids, item_ids].tolist()).flatten()
y[labels > 0] = 1
non_zero_count = np.count_nonzero(y)
print("\t- There are {} non-zero ratings in {}".format(non_zero_count, y.size))
return y, non_zero_count, y.size
def add_user_len_information(data_frame: pd.DataFrame, URM_train: csr_matrix):
"""
Add information concerning the user profile length to the row of the dataframe
:param data_frame: data frame that is being pre-processed from boosting
:param URM_train: URM train from which to take profile length information
:return: data frame with new content inserted
"""
print("Adding user profile length...")
data_frame = data_frame.copy()
user_act = (URM_train > 0).sum(axis=1)
user_act = np.array(user_act).squeeze()
user_ids = np.arange(URM_train.shape[0])
data = np.array([user_ids, user_act])
data = np.transpose(data)
new_df = pd.DataFrame(data=data, columns=["row", "user_act"])
data_frame = pd.merge(data_frame, new_df, left_on="user_id", right_on="row")
data_frame = data_frame.drop(columns=["row"], inplace=False)
return data_frame
def remap_data_frame(df: pd.DataFrame, mapper):
"""
Change user_id columns of the df given in input, according to the mapper.
Users that are not present will be removed, and the others will be mapped to the correct number.
:param df: dataframe that will be modified
:param mapper: mapper according to which the dataframe will be modified
:return: dataframe with "user_id" column modified properly
"""
df = df.copy()
# Remove users that are not present in the mapper
original_users = df['row'].values
new_users_key = list(mapper.keys())
new_users_key = list(map(int, new_users_key))
new_users_key = np.array(new_users_key)
mask = np.in1d(original_users, new_users_key, invert=True)
remove = original_users[mask]
df = df.set_index("row")
mask = np.in1d(df.index, remove)
df = df.drop(df.index[mask])
# Map the index to the new one
df = df.reset_index()
df['row'] = df['row'].map(lambda x: mapper[str(x)])
return df
def add_UCM_information(data_frame: pd.DataFrame, user_mapper, path="../../data/", use_region=True, use_age=True,
use_age_onehot=False):
"""
Add UCM information to the data frame for XGboost
:param data_frame: data frame containing information being pre-processed for boosting
:param user_mapper: mapper original users to train users
:param path: where to read UCM csv files
:param use_region: True is region information should be used, false otherwise
:param use_age: True if age information should be used, false otherwise
:param use_age_onehot: True if age information added is one hot, false otherwise
:return: pd.DataFrame containing the original data frame+ UCM information
"""
print("Adding UCM information...")
t_users = get_total_number_of_users() # Total number of users (-1 since indexing from 0)
data_frame = data_frame.copy()
df_region: pd.DataFrame = pd.read_csv(path + "data_UCM_region.csv")
df_age: pd.DataFrame = pd.read_csv(path + "data_UCM_age.csv")
# Re-map UCM data frame in order to have the correct user information
if use_region:
df_region = df_region[['row', 'col']]
df_dummies = pd.get_dummies(df_region['col'], prefix='region')
df_dummies = df_dummies.join(df_region['row'])
df_dummies = df_dummies.groupby(['row'], as_index=False).sum()
# Fill missing values
user_present = df_dummies['row'].values
total_users = np.arange(t_users)
mask = np.in1d(total_users, user_present, invert=True)
missing_users = total_users[mask]
num_col = df_dummies.columns.size
imputed_users = np.zeros(shape=(num_col, missing_users.size))
imputed_users[0] = missing_users
missing_df = pd.DataFrame(data=np.transpose(imputed_users), dtype=np.int32, columns=df_dummies.columns)
df_region_onehot = df_dummies.append(missing_df, sort=False)
if user_mapper is not None:
df_region_onehot = remap_data_frame(df=df_region_onehot, mapper=user_mapper)
data_frame = pd.merge(data_frame, df_region_onehot, right_on="row", left_on="user_id")
data_frame = data_frame.drop(columns=["row"], inplace=False)
if use_age:
df_age = df_age[['row', 'col']]
# Handle missing values: fill with mode + 1
users_present = df_age['row'].values
total_users = np.arange(t_users)
mask = np.in1d(total_users, users_present, invert=True)
missing_users = total_users[mask].astype(np.int32)
missing_val_filled = np.ones(missing_users.size) * (int(df_age['col'].mode()) + 1)
missing = np.array([missing_users, missing_val_filled], dtype=np.int32)
missing_df = pd.DataFrame(data=np.transpose(missing), columns=["row", "col"])
df_age_imputed = df_age.copy().append(missing_df, sort=False)
df_age_imputed = df_age_imputed.reset_index()
df_age_imputed = df_age_imputed[['row', 'col']]
if user_mapper is not None:
df_age_imputed = remap_data_frame(df=df_age_imputed, mapper=user_mapper)
df_age_imputed = df_age_imputed.rename(columns={"col": "age"})
if use_age_onehot:
row = df_age_imputed['row']
df_age_imputed = pd.get_dummies(df_age_imputed['age'], prefix='age')
df_age_imputed = df_age_imputed.join(row)
data_frame = pd.merge(data_frame, df_age_imputed, right_on="row", left_on="user_id")
data_frame = data_frame.drop(columns=["row"], inplace=False)
# Add dummy variables indicating that the region has been imputed
df_age_dummy_imputation = df_age.copy()
df_age_dummy_imputation['col'] = 0
imputed_df = pd.DataFrame(
data={"row": missing_users, "col": np.ones(shape=missing_users.size, dtype=np.int)})
df_age_dummy_imputation = df_age_dummy_imputation.append(imputed_df, sort=False)
df_age_dummy_imputation = df_age_dummy_imputation.rename(columns={"col": "age_imputed_flag"})
if user_mapper is not None:
df_age_dummy_imputation = remap_data_frame(df=df_age_dummy_imputation, mapper=user_mapper)
data_frame = pd.merge(data_frame, df_age_dummy_imputation, right_on="row", left_on="user_id")
data_frame = data_frame.drop(columns=["row"], inplace=False)
return data_frame
def advanced_subclass_handling(data_frame: pd.DataFrame, URM_train: csr_matrix, path="../../data/",
add_subclass=False):
"""
Here we want to include in the training set sub class information in the following way:
- A column encoding the mean of 'label' for a certain couple (user, subclass): i.e. how many
items of that subclass the user liked
- Including information about the popularity of the subclass (how many items for that subclass
- Including ratings of that subclass
:param URM_train: mean response will be retrieved from here
:param data_frame: dataframe being pre-processed for boosting
:param path: path to the folder containing subclass dataframe
:return: dataframe with augmented information
"""
print("Adding subclass and feature engineering subclass...")
data_frame = data_frame.copy()
df_subclass: pd.DataFrame = pd.read_csv(path + "data_ICM_sub_class.csv")
df_subclass = df_subclass[['row', 'col']]
df_subclass = df_subclass.rename(columns={"col": "subclass"})
# Merging sub class information
data_frame = pd.merge(data_frame, df_subclass, right_on="row", left_on="item_id")
data_frame = data_frame.drop(columns=["row"], inplace=False)
print("\t- Add items present for each subclass")
# Add subclass item-popularity: how many items are present of that subclass
subclass_item_count = df_subclass.groupby("subclass").count()
data_frame = pd.merge(data_frame, subclass_item_count, right_index=True, left_on="subclass")
data_frame = data_frame.rename(columns={"row": "item_per_subclass"})
print("\t- Add ratings popularity for each subclass")
# Add subclass ratings-popularity: how many interactions we have for each subclass
URM_train_csc = URM_train.tocsc()
n_ratings_sub = []
sorted_sub_indices = np.argsort(df_subclass['subclass'].values)
sorted_sub = df_subclass['subclass'][sorted_sub_indices].values
sorted_item_subclass = df_subclass['row'][sorted_sub_indices].values
unique_sorted_sub, sub_indptr = np.unique(sorted_sub, return_index=True)
sub_indptr = np.concatenate([sub_indptr, [sorted_sub.size]])
for i, sub in tqdm(enumerate(unique_sorted_sub), total=unique_sorted_sub.size, desc="\t\tProcessing"):
item_sub = sorted_item_subclass[sub_indptr[i]: sub_indptr[i + 1]]
n_ratings_sub.append(URM_train_csc[:, item_sub].data.size)
ratings_sub = np.array([unique_sorted_sub, n_ratings_sub])
ratings_per_sub_df = pd.DataFrame(data=np.transpose(ratings_sub),
columns=["subclass", "global_ratings_per_subclass"])
data_frame = pd.merge(data_frame, ratings_per_sub_df, left_on="subclass", right_on="subclass")
# Add subclass ratings-popularity for each user using rating percentage
print("\t- Add ratings popularity for pairs (user, subclass)")
users = data_frame['user_id'].values
sub = data_frame['subclass'].values
perc_array = np.zeros(users.size)
rat_array = np.zeros(users.size)
for i, user in tqdm(enumerate(users), total=users.size, desc="\t\tProcessing"):
curr_sub = sub[i]
curr_sub_index = np.searchsorted(unique_sorted_sub, curr_sub)
# Find items of this subclass
item_sub = sorted_item_subclass[sub_indptr[curr_sub_index]: sub_indptr[curr_sub_index + 1]]
user_item = URM_train.indices[URM_train.indptr[user]: URM_train.indptr[user + 1]]
total_user_likes = user_item.size
mask = np.in1d(item_sub, user_item)
likes_per_sub = item_sub[mask].size
user_p = likes_per_sub / total_user_likes
perc_array[i] = user_p
rat_array[i] = likes_per_sub
data_frame["subclass_user_like_perc"] = perc_array
data_frame["subclass_user_like_quantity"] = rat_array
if not add_subclass:
data_frame = data_frame.drop(columns=["subclass"], inplace=False)
return data_frame
def add_ICM_information(data_frame: pd.DataFrame, path="../../data/", use_price=True, use_asset=True,
use_subclass=True, one_hot_encoding_subclass=False):
"""
Add information form the ICM files to the data frame
:param one_hot_encoding_subclass: if one hot encoding should be applied to subclass or not
:param data_frame: data frame that is being pre-processed for boosting
:param path: path to the folder containing the csv files
:param use_price: True if you wish to append price information, false otherwise
:param use_asset: True if you wish to append asset information, false otherwise
:param use_subclass: True if you wish to append subclass information, false otherwise
:return: pd.DataFrame containing the information
"""
print("Adding ICM information...")
data_frame = data_frame.copy()
df_price: pd.DataFrame = pd.read_csv(path + "data_ICM_price.csv")
df_asset: pd.DataFrame = pd.read_csv(path + "data_ICM_asset.csv")
df_subclass: pd.DataFrame = pd.read_csv(path + "data_ICM_sub_class.csv")
total_items = get_total_number_of_items()
total_items = np.arange(total_items)
if use_price:
# Handle missing values
item_present = df_price['row'].values
mask = np.in1d(total_items, item_present, invert=True)
missing_items = total_items[mask].astype(np.int32)
missing_val_filled = np.ones(missing_items.size) * df_price['data'].median()
missing = np.array([missing_items, missing_val_filled])
missing_df = pd.DataFrame(data=np.transpose(missing), columns=['row', 'data'])
df_price = df_price.append(missing_df, sort=False)
df_price = df_price.reset_index()
df_price = df_price[['row', 'data']]
# TODO remove outliers and add dummy variable
df_price = df_price.rename(columns={"data": "price"})
data_frame = pd.merge(data_frame, df_price, right_on="row", left_on="item_id")
data_frame = data_frame.drop(columns=['row'], inplace=False)
if use_asset:
# Handle missing values
item_present = df_asset['row'].values
mask = np.in1d(total_items, item_present, invert=True)
missing_items = total_items[mask].astype(np.int32)
missing_val_filled = np.ones(missing_items.size) * df_asset['data'].median()
missing = np.array([missing_items, missing_val_filled])
missing_df = pd.DataFrame(data=np.transpose(missing), columns=['row', 'data'])
df_asset = df_asset.append(missing_df, sort=False)
df_asset = df_asset.reset_index()
df_asset = df_asset[['row', 'data']]
# TODO remove outliers and add dummy variable
df_asset = df_asset.rename(columns={"data": "asset"})
data_frame = | pd.merge(data_frame, df_asset, right_on="row", left_on="item_id") | pandas.merge |
from copy import deepcopy
import numpy as np
import pandas as pd
import pytest
from etna.datasets import TSDataset
from etna.models import NaiveModel
from etna.transforms.missing_values import TimeSeriesImputerTransform
from etna.transforms.missing_values.imputation import _OneSegmentTimeSeriesImputerTransform
@pytest.fixture
def ts_nans_beginning(example_reg_tsds):
"""Example dataset with NaNs at the beginning."""
ts = deepcopy(example_reg_tsds)
# nans at the beginning (shouldn't be filled)
ts.loc[ts.index[:5], pd.IndexSlice["segment_1", "target"]] = np.NaN
# nans in the middle (should be filled)
ts.loc[ts.index[8], pd.IndexSlice["segment_1", "target"]] = np.NaN
ts.loc[ts.index[10], pd.IndexSlice["segment_2", "target"]] = np.NaN
ts.loc[ts.index[40], pd.IndexSlice["segment_2", "target"]] = np.NaN
return ts
def test_wrong_init_one_segment():
"""Check that imputer for one segment fails to init with wrong imputing strategy."""
with pytest.raises(ValueError):
_ = _OneSegmentTimeSeriesImputerTransform(
in_column="target", strategy="wrong_strategy", window=-1, seasonality=1, default_value=None
)
def test_wrong_init_two_segments(all_date_present_df_two_segments):
"""Check that imputer for two segments fails to fit_transform with wrong imputing strategy."""
with pytest.raises(ValueError):
_ = TimeSeriesImputerTransform(strategy="wrong_strategy")
@pytest.mark.smoke
@pytest.mark.parametrize("fill_strategy", ["mean", "zero", "running_mean", "forward_fill", "seasonal"])
def test_all_dates_present_impute(all_date_present_df: pd.DataFrame, fill_strategy: str):
"""Check that imputer does nothing with series without gaps."""
imputer = _OneSegmentTimeSeriesImputerTransform(
in_column="target", strategy=fill_strategy, window=-1, seasonality=1, default_value=None
)
result = imputer.fit_transform(all_date_present_df)
np.testing.assert_array_equal(all_date_present_df["target"], result["target"])
@pytest.mark.smoke
@pytest.mark.parametrize("fill_strategy", ["mean", "zero", "running_mean", "forward_fill", "seasonal"])
def test_all_dates_present_impute_two_segments(all_date_present_df_two_segments: pd.DataFrame, fill_strategy: str):
"""Check that imputer does nothing with series without gaps."""
imputer = TimeSeriesImputerTransform(strategy=fill_strategy)
result = imputer.fit_transform(all_date_present_df_two_segments)
for segment in result.columns.get_level_values("segment"):
np.testing.assert_array_equal(all_date_present_df_two_segments[segment]["target"], result[segment]["target"])
@pytest.mark.parametrize("fill_strategy", ["zero", "mean", "running_mean", "forward_fill", "seasonal"])
def test_all_missing_impute_fail(df_all_missing: pd.DataFrame, fill_strategy: str):
"""Check that imputer can't fill nans if all values are nans."""
imputer = _OneSegmentTimeSeriesImputerTransform(
in_column="target", strategy=fill_strategy, window=-1, seasonality=1, default_value=None
)
with pytest.raises(ValueError, match="Series hasn't non NaN values which means it is empty and can't be filled"):
_ = imputer.fit_transform(df_all_missing)
@pytest.mark.parametrize("fill_strategy", ["mean", "running_mean", "forward_fill", "seasonal"])
def test_all_missing_impute_fail_two_segments(df_all_missing_two_segments: pd.DataFrame, fill_strategy: str):
"""Check that imputer can't fill nans if all values are nans."""
imputer = TimeSeriesImputerTransform(strategy=fill_strategy)
with pytest.raises(ValueError, match="Series hasn't non NaN values which means it is empty and can't be filled"):
_ = imputer.fit_transform(df_all_missing_two_segments)
def test_one_missing_value_zero(df_with_missing_value_x_index: pd.DataFrame):
"""Check that imputer with zero-strategy works correctly in case of one missing value in data."""
df, idx = df_with_missing_value_x_index
imputer = _OneSegmentTimeSeriesImputerTransform(
in_column="target", strategy="zero", window=-1, seasonality=1, default_value=None
)
result = imputer.fit_transform(df)["target"]
assert result.loc[idx] == 0
assert not result.isna().any()
def test_range_missing_zero(df_with_missing_range_x_index: pd.DataFrame):
"""Check that imputer with zero-strategy works correctly in case of range of missing values in data."""
df, rng = df_with_missing_range_x_index
imputer = _OneSegmentTimeSeriesImputerTransform(
in_column="target", strategy="zero", window=-1, seasonality=1, default_value=None
)
result = imputer.fit_transform(df)["target"]
expected_series = | pd.Series(index=rng, data=[0 for _ in rng], name="target") | pandas.Series |
# -*- coding: utf-8 -*-
"""
Littlefinger
Performing calculations on personal financial transactions
Reports:
"""
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from dateutil.parser import parse
import seaborn as sns
import matplotlib.pyplot as plt
from pandas.tseries.offsets import *
pd.set_option('float_format', '{:.2f}'.format)
plt.style.use('seaborn-colorblind')
# Useful timeranges
now = datetime.now()
weekago = now - timedelta(7)
def tidyxacts(df):
"""
Fix formats of columns for transactions in dataframe df
Parameters
----------
df : Dataframe
Returns
-------
Date formatted as Date
Amounts as numeric
Top and sub level categories as category
Adds columns for year, month, quarter, week number
Sets date as index
"""
df['Date'] = pd.to_datetime(df['Date'])
df['Inflow'] = pd.to_numeric(df['Inflow'])
df['Outflow'] = pd.to_numeric(df['Outflow'])
df['Net'] = pd.to_numeric(df['Net'])
# df['Master Category'] = df['Master Category'].astype("category")
# df['Sub Category'] = df['Sub Category'].astype("category")
# df['Year'], df['Month'] = df['Date'].dt.year, df['Date'].dt.strftime('%b')
df['Year'], df['Month'] = df['Date'].dt.year, df['Date'].dt.month
df['Quarter'], df['Week'] = df['Date'].dt.quarter, df['Date'].dt.week
df.set_index(['Date'], inplace=True)
def add_date_info(df):
"""
Adds columns for year, month, quarter, week number
"""
df['Year'], df['Month'] = df['Date'].dt.year, df['Date'].dt.strftime('%b')
df['Quarter'], df['Week'] = df['Date'].dt.quarter, df['Date'].dt.week
def top10expenses(df, starttime, endtime):
"""
Returns top 10 expenses between dates.
Makes a smaller list of relevant columns and filters df.
TODO
----
Add categorical listing to expense/income to avoid showing income
"""
cols = ['Account', 'Date', 'Payee', 'Master Category', 'Sub Category',
'Outflow', 'Type']
df2 = df[cols]
filtered = df2[(df2['Date'] >= starttime) & (df2['Date'] <= endtime) &
df['Type'] == 'Expense']
return filtered.sort_values(by='Outflow', ascending=False).head(10)
def account_balances(df):
"""
Returns current account balances
"""
return pd.pivot_table(df, index=["Account"], values=["Net"],
aggfunc=np.sum).sort_values(by='Net')
def annualspends(df):
"""
Returns pivot table by year and master category.
"""
return pd.pivot_table(uk, index=['Master Category'], columns=['Year'],
values=["Net"], aggfunc=np.sum)
# Each account's balance
def accounttrend(df):
"""
Plot running total of each account over all time
"""
plt.figure()
grouped = df['Net'].groupby(df['Account'])
for account, net in grouped:
net.cumsum().plot(label=account, legend=True)
def funds_net_worth():
"""
This function calculates the net worth of each fund over time, plots it
and calculates the total
"""
funds = excel.parse('Investments')
prices = excel.parse('Funds')
funds.set_index(['Date'], inplace=True)
prices.set_index(['Date'], inplace=True)
# Prices at point of purchase
purchase_prices = funds.pivot(index='Date', columns='Company Code',
values='Price (GBP)')
# Price Table
# TODO - import from somewhere
price_table = prices.pivot(index='Date', columns='Fund', values='Price')
# Join prices
all_prices = pd.concat([price_table, purchase_prices]).sort_index()
all_prices.fillna(method='ffill', inplace=True)
# Cumulative Amount of Funds
fund_table = pd.pivot_table(funds, values=['Quantity'], aggfunc=np.sum,
index=['Date'], columns=['Company Code'],
fill_value=0).cumsum()
return all_prices, fund_table
def xact_type(df):
"""
Adds transfer, expense, income category column to df
"""
def categorise(row):
if row['Master Category'] == "Transfer":
return 'Transfer'
if row['Master Category'] == "Income":
return 'Income'
return 'Expense'
df['Type'] = df.apply(lambda row: categorise(row), axis=1)
"""
Extra things to play with
Groupby:
grouped = df.groupby(lambda x: x.year)
for year, group in grouped:
....: print (year)
....: print (group)
usexpenses = us.query('Type == ["Expense"]')
Last month date
lastmonth = now - DateOffset(months=1)
filter = str(lastmonth.year) + "-" + str(lastmonth.month)
df[filter]
Plot expenses graph:
usexpenses = us.query('Type == ["Expense"]').copy()
usexpenses.Net = usexpenses.Net * -1
usexpenses.groupby('Master Category')['Net'].sum().plot(kind="bar") ; plt.axhline(0, color='k')
Graph of stacked subcategoryies per master category
usexpenses.groupby(['Master Category', 'Sub Category'])['Net'].sum().unstack().plot(kind='bar', stacked=True)
summary = pd.pivot_table(us['2017'], index=['Master Category'], columns=['Month'], values=['Net'], aggfunc=np.sum, fill_value=0)
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
summary.columns = months[:now.month] # Renames to months based off current month
summary.loc['Total']= summary.sum() # adds total row at bottom
"""
def monthly_pivot(df):
pass
# Open Excel and parse worksheets
excel = | pd.ExcelFile("C:\\Users\\aowd\OneDrive - Chevron\\Special Projects\\littlefinger\\Money1.2.xlsx") | pandas.ExcelFile |
from django.core.files import temp
from django.shortcuts import render
from django.conf import settings
from django.http import HttpResponse
from django.core.files.storage import FileSystemStorage
from django.http import FileResponse
from django.views.static import serve
import xlsxwriter
import pdfkit
import csv
import numpy
#import required libraries
import pandas as pd
import pyexcel
import xlrd
from matplotlib import pylab
from matplotlib import collections as mc
from pylab import *
from pylev3 import Levenshtein
from matplotlib.ticker import PercentFormatter
from matplotlib import pyplot
import matplotlib.pyplot as plt
import PIL, PIL.Image
import os
try:
from StringIO import BytesIO
except ImportError:
from io import BytesIO
'''from google.colab import drive
drive.mount('/content/drive')'''
# Create your views here.
def welcome(request):
return HttpResponse("Welcome")
def ourResponse(request):
return HttpResponse("OUR RESPONSE")
def takeInput(request):
return render(request,'input.html')
def similarity(seq1, seq2):
l1 , l2 = len(seq1), len(seq2)
ldist = Levenshtein.wf(seq1, seq2)
return (1 - ldist/max(l1, l2))*100
def df_gdomain_counter(df):
df_count = df["ProteinID"].value_counts()
return df_count
def match(x, y, mm):
mismatch = 0
for i in range(len(x)):
if (x[i] == 'X' or x[i] == y[i]):
pass
else:
mismatch += 1
if (mismatch <= mm):
return True
else:
return False
def shuffler(word):
word_to_scramble = list(word)
numpy.random.shuffle(word_to_scramble)
# O=seq= ''.join(seq_temp)
new_word = ''.join(word_to_scramble)
return new_word
def list_of_7mer_X(sevenmer):
x_data = []
for r1 in range(7):
x = list(sevenmer)
x[r1] = "X"
x = ''.join(x)
x_data.append(x)
return x_data
def performAlgo(request):
myfile = request.FILES['document']
print(myfile.name)
fs = FileSystemStorage()
'''fs.save(myfile.name, myfile)'''
workbook = xlsxwriter.Workbook('media/new.xlsx')
family = request.POST.get("input01")
outpath = "media/new.xlsx"
df1 = pd.read_excel(myfile)
df2 = df1
for i in range((df1.shape[0] - 1)):
A = df1.loc[i, "Sequence"]
B = df1.loc[(i + 1), "Sequence"]
percent_similarity = similarity(A, B)
if (percent_similarity >= 90):
df2 = df2.drop(df2[df2.Sequence == B].index)
df2.to_excel(outpath, index=False)
NumProteins = df2.shape[0]
def H(protein_id, protein, x1, x2, x3, x4, mm1, mm2, mm3, mm4, min13, min34, min45, max13, max34, max45):
pL1 = []
pL2 = []
pL3 = []
pL4 = []
L1 = []
L2 = []
L3 = []
L4 = []
for i in range(len(protein) - len(x1)):
if (match(x1, protein[i:i + len(x1)], mm1) == True):
# global L1
pL1 = pL1 + [i]
L1 = L1 + [protein[i:i + len(x1)]]
# print "L1 = ", pL1,L1
for j in range(len(protein) - len(x2)):
if (match(x2, protein[j:j + len(x2)], mm2) == True):
# global L2
pL2 = pL2 + [j]
L2 = L2 + [protein[j:j + len(x2)]]
# print "L2 = ", pL2,L2
for k in range(len(protein) - len(x3)):
if (match(x3, protein[k:k + len(x3)], mm3) == True):
# global L3
pL3 = pL3 + [k]
L3 = L3 + [protein[k:k + len(x3)]]
# print "L3 = ", pL3,L3
for l in range(len(protein) - len(x4)):
if (match(x4, protein[l:l + len(x4)], mm4) == True):
# global L3
pL4 = pL4 + [l]
L4 = L4 + [protein[l:l + len(x4)]]
candidates = []
for i in range(len(pL1)):
for j in range(len(pL2)):
for k in range(len(pL3)):
for l in range(len(pL4)):
if (min13 <= pL2[j] - pL1[i] <= max13 and min34 <= pL3[k] - pL2[j] <= max34 and min45 <=
pL4[l] - pL3[k] <= max45):
# if 80 <=pL2[j]-pL1[i] <= 120 and 40 <=pL3[k]- pL2[j] <= 80 and 20 <=pL4[l]- pL3[k] <= 80
a = L1[i]
a_pos = pL1[i]
b = L2[j]
b_pos = pL2[j]
c = L3[k]
c_pos = pL3[k]
d = L4[l]
d_pos = pL4[l]
candidates.append((protein_id, a, a_pos, b, b_pos, c, c_pos, d, d_pos))
return candidates
abc = []
l1 = []
inpath = "media/new.xlsx"
mismatch1 = int(request.POST.get("mismatch1"))
mismatch2 = int(request.POST.get("mismatch2"))
mismatch3 = int(request.POST.get("mismatch3"))
mismatch4 = int(request.POST.get("mismatch4"))
mismatch41 = mismatch4
x1 = request.POST.get("x1")
x2 = request.POST.get("x2")
x3 = request.POST.get("x3")
x4 = request.POST.get("x4")
Min_G1_G3 = int(request.POST.get("Min_G1_G3"))
Max_G1_G3 = int(request.POST.get("Max_G1_G3"))
Min_G3_G4 = int(request.POST.get("Min_G3_G4"))
Max_G3_G4 = int(request.POST.get("Max_G3_G4"))
Min_G4_G5 = int(request.POST.get("Min_G4_G5"))
Max_G4_G5 = int(request.POST.get("Max_G4_G5"))
workbook = xlsxwriter.Workbook('media/output_wo_bias.xlsx')
outpath = "media/output_wo_bias.xlsx"
df1 = pd.read_excel(inpath)
df2 = df1.set_index("Entry", drop=False)
protein = df2.loc[:, "Sequence"]
protein_id = df2.loc[:, "Entry"]
protein_id
for i in range(len(protein)):
l = H(protein_id[i], protein[i], x1, x2, x3, x4, mismatch1, mismatch2, mismatch3, mismatch4, Min_G1_G3,
Min_G3_G4, Min_G4_G5, Max_G1_G3, Max_G3_G4, Max_G4_G5)
l1.append(l)
abc = [item for sublist in l1 for item in sublist]
gdomains = pd.DataFrame(abc,
columns=['ProteinID', 'G1-box', 'Position', 'G3-box', 'Position.1', 'G4-box', 'Position.2',
'G5-box', 'Position.3'])
gdomains = gdomains[gdomains['ProteinID'].astype(bool)]
gdomains.head()
gdomains.to_excel(outpath, index=False)
abc = []
l1 = []
workbook = xlsxwriter.Workbook('media/SA_nomismatch.xlsx')
outpath = "media/SA_nomismatch.xlsx"
str1 = "XXX"
x41 = str1 + x4 + "X"
mismatch41 = 0
df1 = pd.read_excel(inpath)
df2 = df1.set_index("Entry", drop=False)
protein = df2.loc[:, "Sequence"]
protein_id = df2.loc[:, "Entry"]
#protein_id
for i in range(len(protein)):
l = H(protein_id[i], protein[i], x1, x2, x3, x41, mismatch1, mismatch2, mismatch3, mismatch41, Min_G1_G3,
Min_G3_G4, Min_G4_G5, Max_G1_G3, Max_G3_G4, Max_G4_G5)
l1.append(l)
abc = [item for sublist in l1 for item in sublist]
gdomains = pd.DataFrame(abc,
columns=['ProteinID', 'G1-box', 'Position', 'G3-box', 'Position', 'G4-box', 'Position',
'G5-box', 'Position'])
gdomains = gdomains[gdomains['ProteinID'].astype(bool)]
gdomains.head()
gdomains.to_excel(outpath, index=False)
abc = []
l1 = []
workbook = xlsxwriter.Workbook('media/SA_mismatch.xlsx')
outpath = "media/SA_mismatch.xlsx"
df1 = | pd.read_excel(inpath) | pandas.read_excel |
from unittest.mock import patch
import featuretools as ft
import pandas as pd
import pytest
import woodwork as ww
from pandas.testing import assert_frame_equal
from woodwork.logical_types import (
Boolean,
Categorical,
Datetime,
Double,
Integer,
)
from blocktorch.pipelines.components import DFSTransformer
def test_index_errors(X_y_binary):
with pytest.raises(TypeError, match="Index provided must be string"):
DFSTransformer(index=0)
with pytest.raises(TypeError, match="Index provided must be string"):
DFSTransformer(index=None)
def test_numeric_columns(X_y_multi):
X, y = X_y_multi
X_pd = pd.DataFrame(X)
feature = DFSTransformer()
feature.fit(X_pd, y)
feature.transform(X_pd)
@patch("blocktorch.pipelines.components.transformers.preprocessing.featuretools.dfs")
@patch(
"blocktorch.pipelines.components.transformers.preprocessing.featuretools.calculate_feature_matrix"
)
def test_featuretools_index(mock_calculate_feature_matrix, mock_dfs, X_y_multi):
X, y = X_y_multi
X_pd = pd.DataFrame(X)
X_new_index = X_pd.copy()
index = [i for i in range(len(X))]
new_index = [i * 2 for i in index]
X_new_index["index"] = new_index
mock_calculate_feature_matrix.return_value = pd.DataFrame({})
# check if _make_entity_set keeps the intended index
feature = DFSTransformer()
feature.fit(X_new_index)
feature.transform(X_new_index)
arg_es = mock_dfs.call_args[1]["entityset"].entities[0].df["index"]
arg_tr = (
mock_calculate_feature_matrix.call_args[1]["entityset"].entities[0].df["index"]
)
assert arg_es.to_list() == new_index
assert arg_tr.to_list() == new_index
# check if _make_entity_set fills in the proper index values
feature.fit(X_pd)
feature.transform(X_pd)
arg_es = mock_dfs.call_args[1]["entityset"].entities[0].df["index"]
arg_tr = (
mock_calculate_feature_matrix.call_args[1]["entityset"].entities[0].df["index"]
)
assert arg_es.to_list() == index
assert arg_tr.to_list() == index
def test_transform(X_y_binary, X_y_multi, X_y_regression):
datasets = locals()
for dataset in datasets.values():
X, y = dataset
X_pd = pd.DataFrame(X)
X_pd.columns = X_pd.columns.astype(str)
es = ft.EntitySet()
es = es.entity_from_dataframe(
entity_id="X", dataframe=X_pd, index="index", make_index=True
)
feature_matrix, features = ft.dfs(entityset=es, target_entity="X")
feature = DFSTransformer()
feature.fit(X)
X_t = feature.transform(X)
assert_frame_equal(feature_matrix, X_t)
assert features == feature.features
feature.fit(X, y)
feature.transform(X)
X_pd.ww.init()
feature.fit(X_pd)
feature.transform(X_pd)
def test_transform_subset(X_y_binary, X_y_multi, X_y_regression):
datasets = locals()
for dataset in datasets.values():
X, y = dataset
X_pd = pd.DataFrame(X)
X_pd.columns = X_pd.columns.astype(str)
X_fit = X_pd.iloc[: len(X) // 3]
X_transform = X_pd.iloc[len(X) // 3 :]
es = ft.EntitySet()
es = es.entity_from_dataframe(
entity_id="X", dataframe=X_transform, index="index", make_index=True
)
feature_matrix, features = ft.dfs(entityset=es, target_entity="X")
feature = DFSTransformer()
feature.fit(X_fit)
X_t = feature.transform(X_transform)
assert_frame_equal(feature_matrix, X_t)
@pytest.mark.parametrize(
"X_df",
[
pd.DataFrame(
pd.to_datetime(["20190902", "20200519", "20190607"], format="%Y%m%d")
),
pd.DataFrame(pd.Series([1, 2, 3], dtype="Int64")),
pd.DataFrame(pd.Series([1.0, 2.0, 3.0], dtype="float")),
pd.DataFrame(pd.Series(["a", "b", "a"], dtype="category")),
],
)
def test_ft_woodwork_custom_overrides_returned_by_components(X_df):
y = pd.Series([1, 2, 1])
override_types = [Integer, Double, Categorical, Datetime, Boolean]
for logical_type in override_types:
try:
X = X_df.copy()
X.ww.init(logical_types={0: logical_type})
except (ww.exceptions.TypeConversionError, ValueError):
continue
dft = DFSTransformer()
dft.fit(X, y)
transformed = dft.transform(X, y)
assert isinstance(transformed, pd.DataFrame)
if logical_type == Datetime:
assert {k: type(v) for k, v in transformed.ww.logical_types.items()} == {
"DAY(0)": Integer,
"MONTH(0)": Integer,
"WEEKDAY(0)": Integer,
"YEAR(0)": Integer,
}
else:
assert {k: type(v) for k, v in transformed.ww.logical_types.items()} == {
"0": logical_type
}
@patch("blocktorch.pipelines.components.transformers.preprocessing.featuretools.dfs")
def test_dfs_sets_max_depth_1(mock_dfs, X_y_multi):
X, y = X_y_multi
X_pd = | pd.DataFrame(X) | pandas.DataFrame |
from flask import Flask, g, jsonify, json, request
from flask_cors import CORS
import numpy as np
import os
import pandas as pd
import pysam
from scipy.cluster.hierarchy import linkage, to_tree
import zipfile
def genotype(gt: tuple) -> int:
"""Convert genotype tuple to dosage (0/1/2)"""
return None if gt == (None, None) else gt[0] + gt[1]
def variant_record(variant_id, vcf):
"""Get record for one variant from VCF"""
chrom, pos = variant_id.split(":")
chrom = chrom.replace("chr", "")
pos = int(pos)
recs = list(vcf.fetch(chrom, pos - 1, pos, reopen=True))
assert len(recs) == 1, f"Genotype retrieval error: {variant_id}"
return recs[0]
def geno_matrix(ids, vcf):
"""Get genotype matrix for a list of SNPs
Assumes SNPs are in close proximity on a chromosome, e.g. in a cis-window.
"""
chrom = ids[0].split(":")[0].replace("chr", "")
pos = [int(x.split(":")[1]) for x in ids]
genos = {}
for rec in vcf.fetch(chrom, min(pos) - 1, max(pos) + 1):
if rec.id in ids:
genos[rec.id] = [genotype(rec.samples[s]["GT"]) for s in vcf.header.samples]
mat = np.array([genos[id] if id in genos else [None] * len(vcf.header.samples) for id in ids])
return mat
def get_newick(node, newick, parentdist, leaf_names):
"""Save dendrogram in Newick format
from https://stackoverflow.com/questions/28222179/save-dendrogram-to-newick-format/31878514#31878514
"""
if node.is_leaf():
return "%s:%g%s" % (leaf_names[node.id], parentdist - node.dist, newick)
if len(newick) > 0:
newick = "):%g%s" % (parentdist - node.dist, newick)
else:
newick = ");"
newick = get_newick(node.get_left(), newick, node.dist, leaf_names)
newick = get_newick(node.get_right(), ",%s" % (newick), node.dist, leaf_names)
newick = "(%s" % (newick)
return newick
def row_tree(d):
"""Get Newick representation of matrix for clustering"""
clust = linkage(d, method="average", optimal_ordering=True)
tree = to_tree(clust)
return get_newick(tree, "", tree.dist, d.index)
def validate_genes(ids, genes):
"""Return valid gene IDs for a list of gene IDs/names"""
valid = []
for id in ids:
if id in genes.index:
valid.append(id)
else:
x = list(genes.loc[genes["geneSymbol"] == id, :].index)
if len(x) > 0:
valid.append(x[0])
else:
id2 = id[0].upper() + id[1:].lower()
x = list(genes.loc[genes["geneSymbol"] == id2, :].index)
if len(x) > 0:
valid.append(x[0])
return valid
def format_per_tissue_gene_info(info: list, tissues: list):
"""Collect per-tissue expression and eQTL indicators into a list"""
for gene in info:
gene["statusInTissue"] = []
for tissue in tissues:
item = {
"tissueSiteDetailId": tissue,
"expressed": gene["expr_" + tissue],
"tested": gene["tested_" + tissue],
"eqtl": gene["eqtl_" + tissue],
}
gene["statusInTissue"].append(item)
del gene["expr_" + tissue]
del gene["tested_" + tissue]
del gene["eqtl_" + tissue]
# def load_tpm(path):
# tpm = {}
# expr = pd.read_csv(path, sep="\t")
# samples = pd.read_csv("../data/ref/metadata.csv")
# samples = samples.loc[samples["QC_pass"] == "pass", :]
# expr = expr.loc[:, expr.columns.isin(samples["library"])]
# tis_conv = {"Acbc": "NAcc", "IL": "IL", "LHB": "LHb", "PL": "PL", "VoLo": "OFC"}
# tis = pd.Series([tis_conv[x.split("_")[1]] for x in expr.columns])
# for tissue in tis.unique():
# tpm[tissue] = expr.loc[:, list(tis == tissue)]
# return tpm
def cis_pval(tissue, gene, variant):
"""Return nominal p-value for a given cis-window variant"""
with zipfile.ZipFile(f"../data/cis_pvals/{tissue}.zip", "r") as archive:
fname = f"{tissue}/{gene}.txt"
if fname in archive.namelist():
df = pd.read_csv(archive.open(fname), sep="\t", index_col="variant_id")
if variant in df.index:
return df.loc[variant, "pval_nominal"]
return None
def single_tissue(gene):
"""Return table of significant cis-eSNPs for a gene"""
with zipfile.ZipFile(f"../data/singleTissueEqtl.zip", "r") as archive:
fname = f"singleTissueEqtl/{gene}.txt"
if fname in archive.namelist():
d = pd.read_csv(archive.open(fname), sep="\t", dtype={"chromosome": str})
d["geneId"] = gene
return d
return None
tissueInfo = | pd.read_csv("../data/tissueInfo.txt", sep="\t") | pandas.read_csv |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
def _generate_counter(df_treat, counterfa):
"""
Combine estimated counterfactual outcomes
with previous true outcomes for the treated
unit.
"""
t_complete = df_treat.shape[0]
t_replace = counterfa.shape[0]
untreat = df_treat[:t_complete - t_replace]
np_syn = np.concatenate((untreat.values, counterfa.reshape(-1,1)), axis=0)
df_syn = | pd.DataFrame(np_syn) | pandas.DataFrame |
import os
import sys
import pytest
import numpy as np
import pandas as pd
sys.path.insert(0, os.path.abspath("../package"))
from utils import bmi_classify, load_data, parse_bmi_matrix, parse_bmi_native
def test_parse_bmi_native():
input_data = [{"Gender": "Female", "HeightCm": 167, "WeightKg": 82}]
expected_data = ([{'Gender': 'Female',
'HeightCm': 167,
'WeightKg': 82,
'bmi': 29.402273297715947,
'BMI Category': 'Overweight',
'Health risk': 'Enhanced risk'}],
1)
output = parse_bmi_native(input_data)
assert expected_data == output
def test_parse_bmi_matrix():
input_data = | pd.DataFrame([{"Gender": "Female", "HeightCm": 167, "WeightKg": 82}]) | pandas.DataFrame |
import pandas as pd
class QueryDataLake:
def retrieve_data_as_data_frame(self, dataDesc):
if "query" in dataDesc:
print("Query to data lake not implemented returning default values. Query: " + dataDesc["query"])
return pd.read_csv('../resources/dataset.csv', delimiter=';', index_col=0)
if "expression" in dataDesc:
expression_splitted = dataDesc["expression"].split()
if len(expression_splitted) == 1:
for element in dataDesc["data"]:
delete = [key for key in element if key != expression_splitted[0]]
for key in delete: del element[key]
else:
for element in dataDesc["data"]:
result = []
for x in range(0, len(element[expression_splitted[0]])):
if expression_splitted[1] == '*':
result.append(element[expression_splitted[0]][x] * element[expression_splitted[2]][x])
elif expression_splitted[1] == '/':
result.append(element[expression_splitted[0]][x] / element[expression_splitted[2]][x])
elif expression_splitted[1] == '+':
result.append(element[expression_splitted[0]][x] + element[expression_splitted[2]][x])
elif expression_splitted[1] == '-':
result.append(element[expression_splitted[0]][x] - element[expression_splitted[2]][x])
element["result"] = result
for element in dataDesc["data"]:
delete = [key for key in element if key != "result"]
for key in delete: del element[key]
if len(dataDesc["data"]) == 1:
df_response = pd.DataFrame.from_dict(dataDesc["data"][0])
else:
expression_splitted = dataDesc["expression"].split()
if len(expression_splitted) == 1:
df_response = [dataDesc["data"][0][expression_splitted[0]]]
for x in range(1, len(dataDesc["data"])):
df_response = pd.np.concatenate((df_response, [dataDesc["data"][x][expression_splitted[0]]]))
else:
df_response = [dataDesc["data"][0]["result"]]
for x in range(1, len(dataDesc["data"])):
df_response = | pd.np.concatenate((df_response, [dataDesc["data"][x]["result"]])) | pandas.np.concatenate |
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
def get_transformed_spatial_coordinates(filename: str):
df = pd.read_csv(filename, sep="\t")
spatial_data = df.iloc[:, 0]
spatial_xy = []
for spot in spatial_data:
coordinates = spot.split('x')
coordinates = [float(i) for i in coordinates]
spatial_xy.append(coordinates)
xy_coordinates = pd.DataFrame(spatial_xy, columns=['x', 'y'])
# transform image
x_scale = 288.9
y_scale = 292.6
x_shift = -288.9
y_shift = -292.6
xy_coordinates['x'] = xy_coordinates['x'] * x_scale + x_shift
xy_coordinates['y'] = xy_coordinates['y'] * y_scale + y_shift
return xy_coordinates
def label_components(pca, bound):
components = pca.to_numpy(copy=True)
for i in range(len(pca)):
if components[i] < bound:
components[i] = 1
else:
components[i] = 0
pca_components = pd.DataFrame(components).astype(int)
return pca_components
def match_labels(pred_labels, true_labels):
"""
true and pred labels are both 0, 1. Analyzes and matches which label in true corresponds to which label in true.
Then adjusts labels so they align.
:param true_labels: Set of true labels as column data frame
:param pred_labels: Set of pred labels as column data frame
:return: correctly labeled true and predicted labels as lists
"""
true_np = true_labels.to_numpy(copy=True)
pred_np = pred_labels.to_numpy(copy=True)
# count number of 0-0 and 1-1 matches
same_count = 0
for i in range(len(true_np)):
if true_np[i] == pred_np[i]:
same_count += 1
# If over half are 0-0 and 1-1 labels, its probably correct. Otherwise, swap 0 and 1 in pred
if same_count < (len(true_np) / 2):
for i in range(len(pred_np)):
if pred_np[i] == 1:
pred_np[i] = 0
else:
pred_np[i] = 1
return np.transpose(pred_np).flatten(), np.transpose(true_np).flatten()
def percent_dropout(filename: str):
"""
:param filename: file containing spatial transcriptomics data
:return: percent dropout component as a pandas dataframe
"""
df = | pd.read_csv(filename, sep="\t") | pandas.read_csv |
import numpy as np
import pandas as pd
import statsmodels.api as sm
from sklearn.preprocessing import OneHotEncoder
import statistics
import math
import sys
import itertools
import time
np.seterr(over='raise', under="ignore")
def batch_pp(df, covariates, batch_column, ignore):
"""This function takes in a df, the name of the covariate columns, and the batch column
and it outputs a feature count matrix, feature zero inflation matrix,
batch dummy matrix (one hot vectors as rows), covariate matrix (concatenated one hot vectors )
(covariates coefficient matrix [X_ij], batch dummy matrix [X_batch],
the zero inflation matrix [I_ijk], and count matrix [Y])
NOTE: this df can be a combination of datasets, or an individual dataset"""
# df: [dataframe] input with rows as samples and columns as feature counts.
# should only have OTU names ,covariates, and batch_column in keyspace
# covariates: [List] of the covariates to retain and estimate betas for
# batch_column: [string] column that defines the batches in this dataframe
# ignore: [List] of column names to ignore
################################### Check proper input ###################################
if (batch_column not in df.keys()):
raise ValueError("Column name " + str(batch_column) + " not found")
if (not set(covariates) <= set(df.keys())):
raise ValueError("Covariate columns not found in dataframe")
################################### Turn batch column to one hot vector ###################################
# note: for all features, batch matrix and covariate matrix will be the same.
X_batch = pd.get_dummies(df[batch_column], drop_first=False)
################################### Turn covariate columns covariate matrix ###################################
# number of columns is the number of betas to estimate
X_cov = pd.get_dummies(df[covariates], drop_first=True)
intercept = [1 for _ in range(X_cov.shape[0])]
# adding intercept term
X_cov.insert(0, "intercept", intercept)
################################### Build the feature zero inflation matrix ###################################
# turn numbers to 1 and keep zeroes the way they are
otu_keys = df.keys().drop(ignore)
I = df[otu_keys].replace('0.0', False).astype(bool).replace(False, 0).replace(True, 1)
df_dict = {"X_cov": X_cov,
"X_batch": X_batch,
"I": I,
"Y": df[otu_keys],
"ignore": df[ignore]}
return df_dict
def reduce_batch_effects(Y, I, X_cov, X_batch, verbose=False):
"""This function takes in the output of batch_pp and does the feature-wise batch reduction"""
# INPUT:
# Y: matrix of feature counts with the columns as features and columns as sample counts as rows
# I: matrix of feature zero inflation (1s where values are >=1, 0s o.w.)
# X_cov: covariance matrix (this will give us the betas we need to estimate)
# X_batch: dummy matrix of batch values
# OUTPUT:
# corrected matrix
# merge the dummy variables for the covariates and also for the batch to get the whole design matrix
X_mat = pd.concat([X_cov, X_batch], axis=1).astype(float)
# type conversions and index storing
Y = Y.astype(float)
num_beta_cov = X_cov.shape[1]
num_beta_batch = X_batch.shape[1]
num_features = len(Y.keys())
num_samples = Y.shape[0]
Z = pd.DataFrame(index=Y.index, columns=Y.columns)
# for each of the features, we will calculate the batch reduction coefficients, then reduce the batch effects
count = 0
otu_names = list(Y.keys())
otu_names = [x for x in otu_names if Y[x][Y[x] > 0].count() > 2]
sigma_p_store = {}
beta_params_store = pd.DataFrame(columns=Y.columns, index=X_mat.columns)
beta_cov_store = pd.DataFrame(columns=Y.columns, index=X_cov.columns)
beta_batch_store = {}
start = time.time()
for p in otu_names:
# select only the feature as a row
y_ijp = Y[p]
y_store = Y[p] # storing the original column(unchanged)
I_ijp = I[p].astype(float)
if (count % 100 == 0 and verbose):
print("Estimating β_cov, β_batch, and σ_p for feature {}".format(count))
# --------- Estimate beta_p and beta_batch through OLS regression --------------
# ignore the keys with zero counts and only fit with non zero samples
fit_index = list(y_ijp.to_numpy().astype(float).nonzero()[0])
zero_index = list(set(range(num_samples)) - set(fit_index))
zero_keys = y_store.keys()[zero_index]
# use only non zero counts for index to fit our OLS
y_ijp = y_ijp.iloc[fit_index]
# y_ijp = y_ijp[fit_index] # PREVIOUS VERSION
X_design_mat = X_mat.iloc[fit_index, :]
X_cov_mat = X_cov.iloc[fit_index, :]
X_batch_mat = X_batch.iloc[fit_index, :]
# fit ols
model = sm.OLS(y_ijp, X_design_mat)
res = model.fit()
############# Calculate sigma_p using the standard deviation of previous regression ###########
residuals = y_ijp - X_cov_mat.dot(res.params[:num_beta_cov])
sigma_hat_p = statistics.stdev(residuals)
# store in feature keyed dictionary of standard deviations
sigma_p_store[p] = sigma_hat_p
# separate the beta cov from the beta batch
beta_params = res.params
beta_cov = res.params[:num_beta_cov]
beta_batch = res.params[num_beta_cov:]
# store list of beta parameters indexed by feature
beta_params_store[p] = beta_params
beta_cov_store[p] = beta_cov
beta_batch_store[p] = beta_batch
####################################### Calculate Z_ijp #######################################
z_ijp = (y_ijp - X_cov_mat.dot(res.params[:num_beta_cov])) / sigma_hat_p
Z[p] = z_ijp
count += 1
if count % 25 == 0:
end = time.time()
print('{}/{} completed in: {}s'.format(count, len(otu_names), round(end - start, 2)))
# ------------ LOOP END -----------------------------------------------------------------
end = time.time()
print('Total OLS time: {}s'.format(round(end - start, 2)))
Z = Z.fillna(0)
beta_params_store = beta_params_store.astype(float)
# return X_mat.dot(beta_params_store)
estimates = eb_estimator(X_batch, Z, sigma_p=sigma_p_store, X_add=X_cov.dot(beta_cov_store), verbose=verbose)
return estimates
def eb_estimator(X_batch, Z, sigma_p, X_add, max_itt=6000, verbose=False):
"""This function returns the empirical bayes estimates for gamma_star_p and delta_star_p
as well as the standerdized OTU counts"""
# X_batch: Batch effects dummy matrix (n x alpha) matrix
# Z: Matrix of standerdized data (n x p ) matrix
# sigma_p: Vec of OTU variances
# X_add: matrix to add back after parameter estimation
# max_itt: Maximum number of iterations until convergence
# smooth_delta: bool flag for whether or not we replace the 0 values in delta_i by 1
# Standardized matrix init
Z_out = | pd.DataFrame(index=Z.index, columns=Z.columns) | pandas.DataFrame |
"""
Functions for IMGW database analysis.
get_meteorological_data(interval, stations_kind, years_range, file_format_index=0,
file_format=None, specific_columns=None, keywords=None,
merge_split_stations=True, optimize_memory_usage=False)
get_file_formats(interval, stations_kind, file_format_index)
get_column_names(file_format)
look_for_keywords_in_columns(keywords, file_format=None)
get_urls(interval, stations_kind, years_range)
download_data(urls)
concatenate_data(downloaded_files_names, file_formats, specific_columns,
keywords, optimize_memory_usage, years_range)
"""
def get_file_formats(
interval, stations_kind, file_format_index
):
"""
Return the available file formats for the given 'interval' and 'stations_kind'
(different file formats contain different data).
Keyword arguments:
interval -- data interval from the IMGW database ('monthly', 'daily', 'prompt')
stations_kind -- stations' kind ('synop', 'climat', 'fall')
file_format_index -- which element from the list of available formats will
be returned. Usually two file formats are available, so the length of the list
is 2 ('all', 0, 1)
"""
if interval == 'monthly':
if stations_kind == 'synop':
available_files_formats = ['s_m_d', 's_m_t']
elif stations_kind == 'fall':
available_files_formats = ['o_m']
elif stations_kind == 'climat':
available_files_formats = ['k_m_d', 'k_m_t']
else:
raise ValueError(
"Invalid 'stations_kind' input. Available inputs: 'synop', 'fall', 'climat'.")
elif interval == 'daily':
if stations_kind == 'synop':
available_files_formats = ['s_d', 's_d_t']
elif stations_kind == 'fall':
available_files_formats = ['o_d']
elif stations_kind == 'climat':
available_files_formats = ['k_d', 'k_d_t']
else:
raise ValueError(
"Invalid 'stations_kind' input. Available inputs: 'synop', 'fall', 'climat'.")
elif interval == 'prompt':
if stations_kind == 'synop':
available_files_formats = ['s_t']
elif stations_kind == 'fall':
raise NotADirectoryError("There's no '/dane_meteorologiczne/terminowe/opad/' directory in IMGW database.")
elif stations_kind == 'climat':
available_files_formats = ['k_t']
else:
raise ValueError(
"Invalid 'stations_kind' input. Available inputs: 'synop', 'fall', 'climat'.")
else:
raise ValueError("Invalid 'interval' input. Available inputs: 'monthly', 'daily', 'prompt'.")
if file_format_index == 'all':
chosen_file_format = available_files_formats
elif file_format_index == 0 or file_format_index == 1:
try:
chosen_file_format = [available_files_formats[file_format_index]]
except IndexError:
if len(available_files_formats) == 1:
chosen_file_format = [available_files_formats[0]]
else:
raise Exception("Something's wrong with the file format.")
else:
raise ValueError(
"{} file formats for the given 'interval' and 'stations_kind' available. Use index of the file format or 'all' "
"for 'file_format_index' argument (if 'all', both file formats will be taken): {}".format(
len(available_files_formats), available_files_formats
)
)
return chosen_file_format
def get_column_names(file_format):
"""
Return the column names for the given file format.
Keyword arguments:
file_format -- IMGW database file format (e.g. 's_m_t'). Available file
formats: k_m_d, k_m_t, o_m, s_m_d, s_m_t, k_d, k_d_t, o_d, s_d, s_d_t, k_t,
s_t
"""
if file_format == 'k_m_d':
return ['Kod stacji', 'Nazwa stacji', 'Rok', 'Miesiąc', 'Absolutna temperatura maksymalna [°C]',
'Status pomiaru TMAX', 'Średnia temperatura maksymalna [°C]', 'Status pomiaru TMXS',
'Absolutna temperatura minimalna [°C]', 'Status pomiaru TMIN', 'Średnia temperatura minimalna [°C]',
'Status pomiaru TMNS', 'Średnia temperatura miesięczna [°C]', 'Status pomiaru STM',
'Minimalna temperatura przy gruncie [°C]', 'Status pomiaru TMNG', 'Miesieczna suma opadów [mm]',
'Status pomiaru SUMM', 'Maksymalna dobowa suma opadów [mm]', 'Status pomiaru OPMX',
'Pierwszy dzień wystapienia opadu maksymalnego', 'Ostatni dzień wystąpienia opadu maksymalnego',
'Maksymalna wysokość pokrywy śnieżnej [cm]', 'Status pomiaru PKSN', 'Liczba dni z pokrywą śnieżną',
'Liczba dni z opadem deszczu', 'Liczba dni z opadem śniegu']
elif file_format == 'k_m_t':
return ['Kod stacji', 'Nazwa stacji', 'Rok', 'Miesiąc', 'Średnia miesięczna temperatura [°C]',
'Status pomiaru TEMP', 'Średnia miesięczna wilgotność względna [%]', 'Status pomiaru WLGS',
'Średnia miesięczna prędkość wiatru [m/s]', 'Status pomiaru FWS',
'Średnie miesięczne zachmurzenie ogólne [oktanty]', 'Status pomiaru NOS']
elif file_format == 'o_m':
return ['Kod stacji', 'Nazwa stacji', 'Rok', 'Miesiąc', 'Miesięczna suma opadów [mm]', 'Status pomiaru SUMM',
'Liczba dni z opadem śniegu', 'Status pomiaru LDS', 'Opad maksymalny [mm]', 'Status pomiaru MAXO',
'Dzień pierwszy wystąpienia opadu maksymalnego', 'Dzień ostatni wystąpienia opadu maksymalnego',
'Liczba dni z pokrywą śnieżną', 'Status pomiaru LDPS']
elif file_format == 's_m_d':
return ['Kod stacji', 'Nazwa stacji', 'Rok', 'Miesiąc', 'Absolutna temperatura maksymalna [°C]',
'Status pomiaru TMAX', 'Średnia temperatura maksymalna [°C]', 'Status pomiaru TMXS',
'Absolutna temperatura minimalna [°C]', 'Status pomiaru TMIN', 'Średnia temperatura minimalna [°C]',
'Status pomiaru TMNS', 'Średnia temperatura miesięczna [°C]', 'Status pomiaru STM',
'Minimalna temperatura przy gruncie [°C]', 'Status pomiaru TMNG', 'Miesieczna suma opadów [mm]',
'Status pomiaru SUMM', 'Maksymalna dobowa suma opadów [mm]', 'Status pomiaru OPMX',
'Pierwszy dzień wystapienia opadu maksymalnego', 'Ostatni dzień wystąpienia opadu maksymalnego',
'Miesięczna suma usłonecznienia [godziny]', 'Status pomiaru SUUS',
'Maksymalna wysokość pokrywy śnieżnej [cm]', 'Status pomiaru PKSN', 'Liczba dni z pokrywą śnieżną',
'Status pomiaru PSDN', 'Liczba dni z opadem deszczu', 'Status pomiaru DESD',
'Liczba dni z opadem śniegu',
'Status pomiaru SNID', 'Liczba dni z opadem deszczu ze śniegiem', 'Status pomiaru DSND',
'Liczba dni z gradem', 'Status pomiaru GRDD', 'Liczba dni z mgłą', 'Status pomiaru MGLD',
'Liczba dni z zamgleniem', 'Status pomiaru ZAMD', 'Liczba dni z sadzią', 'Status pomiaru SADD',
'Liczba dni z gołoledzią', 'Status pomiaru GOLD', 'Liczba dni z zamiecią śnieżną niską',
'Status pomiaru ZAND', 'Liczba dni z zamiecią śnieżną wysoką', 'Status pomiaru ZAWD',
'Liczba dni ze zmętnieniem', 'Status pomiaru ZMED', 'Liczba dni z wiatrem >= 10m/s',
'Status pomiaru W10D', 'Liczba dni z wiatrem >15m/s', 'Status pomiaru W15D', 'Liczba dni z burzą',
'Status pomiaru BURD', 'Liczba dni z rosą', 'Status pomiaru ROSD', 'Liczba dni ze szronem',
'Status pomiaru SZRD']
elif file_format == 's_m_t':
return ['Kod stacji', 'Nazwa stacji', 'Rok', 'Miesiąc', 'Średnie miesięczne zachmurzenie ogólne [oktanty]',
'Status pomiaru NOS', 'Średnia miesięczna prędkość wiatru [m/s]', 'Status pomiaru FWS',
'Średnia miesięczna temperatura [°C]', 'Status pomiaru TEMP',
'Średnie miesięczne ciśnienie pary wodnej [hPa]', 'Status pomiaru CPW',
'Średnia miesięczna wilgotność względna [%]', 'Status pomiaru WLGS',
'Średnie miesięczne ciśnienie na poziomie stacji [hPa]', 'Status pomiaru PPPS',
'Średnie miesięczne ciśnienie na pozimie morza [hPa]', 'Status pomiaru PPPM', 'Suma opadu dzień [mm]',
'Status pomiaru WODZ', 'Suma opadu noc [mm]', 'Status pomiaru WONO']
elif file_format == 'k_d':
return ['Kod stacji', 'Nazwa stacji', 'Rok', 'Miesiąc', 'Dzień', 'Maksymalna temperatura dobowa [°C]',
'Status pomiaru TMAX', 'Minimalna temperatura dobowa [°C]', 'Status pomiaru TMIN',
'Średnia temperatura dobowa [°C]', 'Status pomiaru STD', 'Temperatura minimalna przy gruncie [°C]',
'Status pomiaru TMNG', 'Suma dobowa opadów [mm]', 'Status pomiaru SMDB', 'Rodzaj opadu [S/W/ ]',
'Wysokość pokrywy śnieżnej [cm]', 'Status pomiaru PKSN']
elif file_format == 'k_d_t':
return ['Kod stacji', 'Nazwa stacji', 'Rok', 'Miesiąc', 'Dzień', 'Średnia dobowa temperatura [°C]',
'Status pomiaru TEMP', 'Średnia dobowa wilgotność względna [%]', 'Status pomiaru WLGS',
'Średnia dobowa prędkość wiatru [m/s]', 'Status pomiaru FWS',
'Średnie dobowe zachmurzenie ogólne [oktanty]', 'Status pomiaru NOS']
elif file_format == 'o_d':
return ['Kod stacji', 'Nazwa stacji', 'Rok', 'Miesiąc', 'Dzień', 'Suma dobowa opadów [mm]',
'Status pomiaru SMDB', 'Rodzaj opadu [S/W/ ]', 'Wysokość pokrywy śnieżnej [cm]', 'Status pomiaru PKSN',
'Wysokość świeżo spadłego śniegu [cm]', 'Status pomiaru HSS', 'Gatunek śniegu [kod]',
'Status pomiaru GATS', 'Rodzaj pokrywy śnieżnej [kod]', 'Status pomiaru RPSN']
elif file_format == 's_d':
return ['Kod stacji', 'Nazwa stacji', 'Rok', 'Miesiąc', 'Dzień', 'Maksymalna temperatura dobowa [°C]',
'Status pomiaru TMAX', 'Minimalna temperatura dobowa [°C]', 'Status pomiaru TMIN',
'Średnia temperatura dobowa [°C]', 'Status pomiaru STD', 'Temperatura minimalna przy gruncie [°C]',
'Status pomiaru TMNG', 'Suma dobowa opadu [mm]', 'Status pomiaru SMDB', 'Rodzaj opadu [S/W/ ]',
'Wysokość pokrywy śnieżnej [cm]', 'Status pomiaru PKSN', 'Równoważnik wodny śniegu [mm/cm]',
'Status pomiaru RWSN', 'Usłonecznienie [godziny]', 'Status pomiaru USL',
'Czas trwania opadu deszczu [godziny]', 'Status pomiaru DESZ', 'Czas trwania opadu śniegu [godziny]',
'Status pomiaru SNEG', 'Czas trwania opadu deszczu ze śniegiem [godziny]', 'Status pomiaru DISN',
'Czas trwania gradu [godziny]', 'Status pomiaru GRAD', 'Czas trwania mgły [godziny]',
'Status pomiaru MGLA', 'Czas trwania zamglenia [godziny]', 'Status pomiaru ZMGL',
'Czas trwania sadzi [godziny]', 'Status pomiaru SADZ', 'Czas trwania gołoledzi [godziny]',
'Status pomiaru GOLO', 'Czas trwania zamieci śnieżnej niskiej [godziny]', 'Status pomiaru ZMNI',
'Czas trwania zamieci śnieżnej wysokiej [godziny]', 'Status pomiaru ZMWS',
'Czas trwania zmętnienia [godziny]', 'Status pomiaru ZMET', 'Czas trwania wiatru >=10m/s [godziny]',
'Status pomiaru FF10', 'Czas trwania wiatru >15m/s [godziny]', 'Status pomiaru FF15',
'Czas trwania burzy [godziny]', 'Status pomiaru BRZA', 'Czas trwania rosy [godziny]',
'Status pomiaru ROSA', 'Czas trwania szronu [godziny]', 'Status pomiaru SZRO',
'Wystąpienie pokrywy śnieżnej [0/1]', 'Status pomiaru DZPS', 'Wystąpienie błyskawicy [0/1]',
'Status pomiaru DZBL', 'Stan gruntu [Z/R]', 'Izoterma dolna [cm]', 'Status pomiaru IZD',
'Izoterma górna [cm]', 'Status pomiaru IZG', 'Aktynometria [J/cm2]', 'Status pomiaru AKTN']
elif file_format == 's_d_t':
return ['Kod stacji', 'Nazwa stacji', 'Rok', 'Miesiąc', 'Dzień', 'Średnie dobowe zachmurzenie ogólne [oktanty]',
'Status pomiaru NOS', 'Średnia dobowa prędkość wiatru [m/s]', 'Status pomiaru FWS',
'Średnia dobowa temperatura [°C]', 'Status pomiaru TEMP', 'Średnia dobowe ciśnienie pary wodnej [hPa]',
'Status pomiaru CPW', 'Średnia dobowa wilgotność względna [%]', 'Status pomiaru WLGS',
'Średnia dobowe ciśnienie na poziomie stacji [hPa]', 'Status pomiaru PPPS',
'Średnie dobowe ciśnienie na pozimie morza [hPa]', 'Status pomiaru PPPM', 'Suma opadu dzień [mm]',
'Status pomiaru WODZ', 'Suma opadu noc [mm]', 'Status pomiaru WONO']
elif file_format == 'k_t':
return ['Kod stacji', 'Nazwa stacji', 'Rok', 'Miesiąc', 'Dzień', 'Godzina', 'Temperatura powietrza [°C]',
'Status pomiaru TEMP', 'Temperatura termometru zwilżonego [°C]', 'Status pomiaru TTZW',
'Wskaźnik lodu [L/W]', 'Wskaźnik wentylacji [W/N]', 'Wilgotność względna [%]', 'Status pomiaru WLGW',
'Kod kierunku wiatru [kod]', 'Status pomiaru DKDK', 'Prędkość wiatru [m/s]', 'Status pomiaru FWR',
'Zachmurzenie ogólne [0-10 do dn.31.12.1988/oktanty od dn.01.01.1989]', 'Status pomiaru ZOGK',
'Widzialność [kod]', 'Status pomiaru WID']
elif file_format == 's_t':
return ['Kod stacji', 'Nazwa stacji', 'Rok', 'Miesiąc', 'Dzień', 'Godzina',
'Wysokość podstawy chmur CL CM szyfrowana [kod]', 'Status pomiaru HPOD',
'Wysokość podstawy niższej [m]',
'Status pomiaru HPON', 'Wysokość podstawy wyższej [m]', 'Status pomiaru HPOW',
'Wysokość podstawy tekstowy [opis]', 'Pomiar przyrzadem 1 (niższa) [P]',
'Pomiar przyrzadem 2 (wyższa) [P]',
'Widzialność [kod]', 'Status pomiaru WID', 'Widzialność operatora [m]', 'Status pomiaru WIDO',
'Widzialność automat [m]', 'Status pomiaru WIDA', 'Zachmurzenie ogólne [oktanty]', 'Status pomiaru NOG',
'Kierunek wiatru [°]', 'Status pomiaru KRWR', 'Prędkość wiatru [m/s]', 'Status pomiaru FWR',
'Poryw wiatru [m/s]', 'Status pomiaru PORW', 'Temperatura powietrza [°C]', 'Status pomiaru TEMP',
'Temperatura termometru zwilżonego [°C]', 'Status pomiaru TTZW', 'Wskaźnik wentylacji [W/N]',
'Wskaźnik lodu [L/W]', 'Ciśnienie pary wodnej [hPa]', 'Status pomiaru CPW', 'Wilgotność względna [%]',
'Status pomiaru WLGW', 'Temperatura punktu rosy [°C]', 'Status pomiaru TPTR',
'Ciśnienie na pozimie stacji [hPa]', 'Status pomiaru PPPS', 'Ciśnienie na poziomie morza [hPa]',
'Status pomiaru PPPM', 'Charakterystyka tendencji [kod]', 'Wartość tendencji [wartość]',
'Status pomiaru APP',
'Opad za 6 godzin [mm]', 'Status pomiaru WO6G', 'Rodzaj opadu za 6 godzin [kod]', 'Status pomiaru ROPT',
'Pogoda bieżąca [kod]', 'Pogoda ubiegła [kod]', 'Zachmurzenie niskie [oktanty]', 'Status pomiaru CLCM',
'Chmury CL [kod]', 'Status pomiaru CHCL', 'Chmury CL tekstem', 'Chmury CM [kod]', 'Status pomiaru CHCM',
'Chmury CM tekstem', 'Chmury CH [kod]', 'Status pomiaru CHCH', 'Chmury CH tekstem', 'Stan gruntu [kod]',
'Status pomiaru SGRN', 'Niedosyt wilgotności [hPa]', 'Status pomiaru DEFI', 'Usłonecznienie',
'Status pomiaru USLN', 'Wystąpienie rosy [0/1]', 'Status pomiaru ROSW',
'Poryw maksymalny za okres WW [m/s]', 'Status pomiaru PORK', 'Godzina wystąpienia porywu',
'Minuta wystąpienia porywu', 'Temperatura gruntu -5 [°C]', 'Status pomiaru TG05',
'Temperatura gruntu -10 [°C]', 'Status pomiaru TG10', 'Temperatura gruntu -20 [°C]',
'Status pomiaru TG20', 'Temperatura gruntu -50 [°C]', 'Status pomiaru TG50',
'Temperatura gruntu -100 [°C]',
'Status pomiaru TG100', 'Temperatura minimalna za 12 godzin [°C]', 'Status pomiaru TMIN',
'Temperatura maksymalna za 12 godzin [°C]', 'Status pomiaru TMAX',
'Temperatura minimalna przy gruncie za 12 godzin [°C]', 'Status pomiaru TGMI',
'Równoważnik wodny śniegu [mm/cm]', 'Status pomiaru RWSN', 'Wysokość pokrywy śnieżnej [cm]',
'Status pomiaru PKSN', 'Wysokość świeżo spadłego śniegu [cm]', 'Status pomiaru HSS',
'Wysokość śniegu na poletku [cm]', 'Status pomiaru GRSN', 'Gatunek śniegu [kod]',
'Ukształtowanie pokrywy [kod]', 'Wysokość próbki [cm]', 'Status pomiaru HPRO', 'Ciężar próbki [g]',
'Status pomiaru CIPR']
def search_for_keywords_in_columns(
keywords, file_format=None
):
"""
Search for the given keywords in the column names and return a dictionary with
the file formats in which the keywords were found.
Keyword arguments:
keywords -- keywords that will be looked for
file_format -- IMGW database file format, the columns of which will be
used to look for keywords. If 'file_format' is None, then every column from
every file will be taken and the function will show you exactly where the
keywords were found (default None)
"""
if type(keywords) == str:
keywords = [keywords]
if file_format is None:
intervals = ['monthly', 'daily', 'prompt']
stations_kinds = ['synop', 'climat', 'fall']
found_file_formats = {}
for interval in intervals:
for kind in stations_kinds:
if interval == 'prompt' and kind == 'fall':
continue
file_formats = get_file_formats(interval, kind, 'all')
found_file_formats['interval=' + str(interval) + ', ' + 'stations_kind=' + str(kind)] = file_formats
keywords_in_files = {}
if type(keywords) == list:
for interval_stkind, file_formats in found_file_formats.items():
for file in file_formats:
columns_names = get_column_names(file)
for name in columns_names:
for keyword in keywords:
if keyword.upper() in name.upper():
try:
keywords_in_files[str(interval_stkind) + ", " + "file_format=" + str(file)].append(
name)
except KeyError:
keywords_in_files[str(interval_stkind) + ", " + "file_format=" + str(file)] = [name]
else:
raise ValueError("Invalid input for 'keywords'. Use a list of strings or a single str.")
return keywords_in_files
else:
if type(file_format) == str:
keywords_in_columns = []
columns_names = get_column_names(file_format)
for name in columns_names:
if type(keywords) == list:
for keyword in keywords:
if keyword.upper() in name.upper():
keywords_in_columns.append(name)
else:
raise ValueError("Invalid input for 'keywords'. Use a list of strings or a single str.")
else:
raise ValueError("Invalid input for the 'file_format' argument. Use a single str.")
return keywords_in_columns
def get_urls(
interval, stations_kind, years_range
):
"""
Return the urls to the IMGW database for the given 'interval', 'stations_kind'
and 'years_range'.
Keyword arguments:
interval -- data interval from the IMGW database (monthly, daily, prompt)
stations_kind -- stations' kind (synop, climat, fall)
years_range -- years range (e.g. range(1966, 2021))
"""
if interval == 'monthly':
interval = 'miesieczne/'
elif interval == 'daily':
interval = 'dobowe/'
elif interval == 'prompt':
interval = 'terminowe/'
if stations_kind == 'fall':
raise NotADirectoryError("There's no '/dane_meteorologiczne/terminowe/opad/' directory.")
else:
raise ValueError("Invalid 'interval' input. Available inputs: 'monthly', 'daily', 'prompt'.")
if stations_kind == 'synop':
stations_kind = 'synop/'
elif stations_kind == 'fall':
stations_kind = 'opad/'
elif stations_kind == 'climat':
stations_kind = 'klimat/'
else:
raise ValueError("Invalid 'stations_kind' input. Available inputs: 'synop', 'fall', 'climat'.")
years_endings = []
for year in years_range:
if year < 2001:
if year > 1995:
if years_endings.count('1996_2000/') == 0:
years_endings.append('1996_2000/')
elif year > 1990:
if years_endings.count('1991_1995/') == 0:
years_endings.append('1991_1995/')
elif year > 1985:
if years_endings.count('1986_1990/') == 0:
years_endings.append('1986_1990/')
elif year > 1980:
if years_endings.count('1981_1985/') == 0:
years_endings.append('1981_1985/')
elif year > 1975:
if years_endings.count('1976_1980/') == 0:
years_endings.append('1976_1980/')
elif year > 1970:
if years_endings.count('1971_1975/') == 0:
years_endings.append('1971_1975/')
elif year > 1965:
if years_endings.count('1966_1970/') == 0:
years_endings.append('1966_1970/')
elif year > 1959:
if years_endings.count('1960_1965/') == 0:
years_endings.append('1960_1965/')
else:
raise ValueError("No data for {}. Available years range: 1960-2021.".format(str(year)))
else:
years_endings.append(str(year) + '/')
urls = []
base_url = 'https://danepubliczne.imgw.pl/data/dane_pomiarowo_obserwacyjne/dane_meteorologiczne/'
for ending in years_endings:
url = base_url + interval + stations_kind + ending
urls.append(url)
return urls
def download_data(urls):
"""
Download data from the IMGW database.
Keyword arguments:
urls -- urls for the data which is requested
"""
import requests
from bs4 import BeautifulSoup as bs
import zipfile
import io
files_reading_dir_path = str(__file__).replace('imgw.py', 'files_reading_folder')
for url in urls:
if urls.index(url) == 0:
print("Data download started... 0% done")
r = requests.get(url)
soup = bs(r.content, features="html.parser")
zip_file_paths = []
for element in soup.find_all('a'):
if '.zip' in element.get_text():
zip_file_paths.append(element.get_text())
for path in zip_file_paths:
zip_file = requests.get(url + path)
zip_file = zipfile.ZipFile(io.BytesIO(zip_file.content))
zip_file.extractall(files_reading_dir_path)
if (urls.index(url) + 1) % 5 == 0:
print("Downloading data... {}% done".format(round((urls.index(url) / len(urls)) * 100)))
print("Data downloaded! 100% done")
def concatenate_data(
downloaded_files_names, file_formats, specific_columns,
keywords, optimize_memory_usage, years_range,
merge_splitted_stations
):
"""
Merge tables from downloaded files and return them as one merged pd.DataFrame.
Keyword arguments:
downloaded_files_names -- list of downloaded file names
file_formats -- IMGW file formats included in downloaded files
specific_columns -- which columns will be taken for merge
keywords -- words which must be in the column name if the column is to be
merged
optimize_memory_usage -- reduce pd.DataFrame memory usage
years_range -- filter pd.DataFrame up to the given period
"""
import pandas as pd
import numpy as np
import os
if isinstance(file_formats, list) and len(file_formats) > 1:
raise ValueError(
f"""Invalid value for the 'file_format' argument. Data downloading is possible only for a single file.
{len(file_formats)} files given ({file_formats}).
"""
)
if isinstance(file_formats, str):
file_formats = [file_formats]
if isinstance(keywords, str):
keywords = [keywords]
df = | pd.DataFrame() | pandas.DataFrame |
import os
from os import listdir
from os.path import isfile, join
import re
from path import Path
import numpy as np
import pandas as pd
from poor_trader import utils
from poor_trader.utils import quotes_range
from poor_trader.config import INDICATORS_OUTPUT_PATH
def _true_range(df_quotes, indices):
cur = df_quotes.iloc[indices[1]]
prev = df_quotes.iloc[indices[0]]
high, low, prev_close = cur.High, cur.Low, prev.Close
a = utils.roundn(high - low, 4)
b = utils.roundn(abs(high - prev_close), 4)
c = utils.roundn(abs(low - prev_close), 4)
return max(a, b, c)
def true_range(df_quotes):
df = pd.DataFrame(index=df_quotes.index)
df['n_index'] = range(len(df_quotes))
_trf = lambda x: _true_range(df_quotes, [int(i) for i in x])
df['true_range'] = df.n_index.rolling(2).apply(_trf)
return df.filter(like='true_range')
def SMA(df_quotes, period, field='Close', symbol=None):
if symbol:
outpath = INDICATORS_OUTPUT_PATH / '{}/{}_{}_SMA_{}.pkl'.format(symbol, quotes_range(df_quotes), field, period)
if os.path.exists(outpath):
return pd.read_pickle(outpath)
df = | pd.DataFrame(index=df_quotes.index) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
This module holds functions processing the results
of an oemof.solph optimisation model, that are used by methods of the classes
`q100opt.scenario_tools.DistrictScenario` and
`q100opt.scenario_tools.ParetoFront`.
Please use this module with care. It is work in progress!
Contact: <NAME> <<EMAIL>>
SPDX-License-Identifier: MIT
"""
import logging
import numpy as np
import oemof.solph as solph
import pandas as pd
from oemof.solph import views
def analyse_emissions(results):
"""
Performs analysis of emissions.
Parameters
----------
results : dict
Results of oemof.solph Energysystem.
Returns
-------
dict : Table with detailed emission analysis,
containing 2 keys: 'summary' and 'sequences'.
"""
return analyse_flow_attribute(results, keyword='emission_factor')
def analyse_costs(results):
"""
Performs a cost analysis.
Parameters
----------
results : dict
Results of oemof.solph Energysystem.
Returns
-------
dict : Table with detailed cost summary,
containing 3 keys: 'capex', 'opex' and 'all'.
"""
costs = {
'capex': analyse_capex(results),
'opex': analyse_flow_attribute(results, keyword='variable_costs'),
}
capex = pd.concat({'capex': costs['capex']}, names=['cost_type'])
opex = pd.concat({'opex': costs['opex']['sum']}, names=['cost_type'])
all = pd.concat([capex, opex])
costs.update({'all': all})
return costs
def analyse_capex(results):
"""
Analysis and Summary of the investment costs of the EnergySystem.
Parameters
----------
results : q100opt.DistrictScenario.results
The results Dictionary of the District Scenario class
(a dictionary containing the processed oemof.solph.results
with the key 'main' and the oemof.solph parameters
with the key 'param'.)
Returns
-------
pd.DataFrame :
The table contains both the parameter and result value
of the Investment objects.
- Columns: 'ep_costs', 'offset', 'invest_value' and 'costs'
- Index:
- First level: 'converter' or 'storage
(Converter are all flows comming from a solph.Transformer or
a solph.Source)
- Second level: Label of the corresponding oemof.solph component:
in case of 'converter', the label from which the flow is comming.
in case of 'storage', the label of the GenericStorage.
"""
# energy converter units
df_converter = get_invest_converter_table(results)
df_converter['category'] = 'converter'
# energy storages units
df_storages = get_invest_storage_table(results)
df_storages['category'] = 'storage'
df_result = pd.concat([df_converter, df_storages])
df_result.index = pd.MultiIndex.from_frame(
df_result[['category', 'label']])
df_result.drop(df_result[['category', 'label']], axis=1, inplace=True)
return df_result
def get_invest_converter(results):
"""
Gets the keys of investment converter units of the energy system.
Only the flows from a solph.Transformer or a solph.Source are considered.
"""
return [
x for x in results.keys()
if hasattr(results[x]['scalars'], 'invest')
if isinstance(x[0], solph.Transformer) or isinstance(
x[0], solph.Source)
]
def get_invest_storages(results):
"""
Gets the investment storages of the energy system.
Only the investment of the solph.components.GenericStorage is considered,
and not a investment in the in- or outflow.
"""
return [
x for x in results.keys()
if x[1] is None
if hasattr(results[x]['scalars'], 'invest')
if isinstance(x[0], solph.components.GenericStorage)
]
def get_invest_converter_table(results):
"""
Returns a table with a summary of investment flows of energy converter
units. These are oemof.solph.Flows comming from a solph.Transformer or
a solph.Source.
Parameters
----------
results : q100opt.DistrictScenario.results
The results Dictionary of the District Scenario class
(a dictionary containing the processed oemof.solph.results
with the key 'main' and the oemof.solph parameters
with the key 'param'.)
Returns
-------
pd.DataFrame :
The table contains both the parameter and result value
of the Investment objects.
- Columns: 'label', 'ep_costs', 'offset', 'invest_value' and 'costs'
The 'label' column is the label of the corresponding
oemof.solph.Transformer or Source, from that the flow is coming.
"""
converter_units = get_invest_converter(results['main'])
return get_invest_table(results, converter_units)
def get_invest_storage_table(results):
"""
Returns a table with a summary of investment flows of all
oemof.solph.components.GeneicStorage units.
results : q100opt.DistrictScenario.results
The results Dictionary of the District Scenario class
(a dictionary containing the processed oemof.solph.results
with the key 'main' and the oemof.solph parameters
with the key 'param'.)
Returns
-------
pd.DataFrame :
The table contains both the parameter and result value
of the Investment objects.
- Columns: 'label', 'ep_costs', 'offset', 'invest_value' and 'costs'
The 'label' column is the label of the corresponding oemof.solph
label, which is the label from which the flow is coming.
"""
storages = get_invest_storages(results['main'])
return get_invest_table(results, storages)
def get_invest_table(results, keys):
"""
Returns the investment data for a list of "results keys".
Parameters
----------
results : dict
oemof.solph results dictionary (results['main])
keys : list
Keys of flows and nodes
Returns
-------
pd.DataFrame :
The table contains both the parameter and result value
of the Investment objects.
- Columns: 'label', 'ep_costs', 'offset', 'invest_value' and 'costs'
The 'label' column is the label of the corresponding oemof.solph
label, which is the label from which the flow is coming.
"""
invest_lab = [x[0].label for x in keys]
df = pd.DataFrame(data=invest_lab, columns=['label'])
df['ep_costs'] = [results['param'][x]['scalars']['investment_ep_costs']
for x in keys]
df['offset'] = [results['param'][x]['scalars']['investment_offset']
for x in keys]
df['invest_value'] = [results['main'][x]['scalars']['invest']
for x in keys]
df['costs'] = df['invest_value'] * df['ep_costs'] + df[
'offset'] * np.sign(df['invest_value'])
return df
def analyse_flow_attribute(des_results, keyword='variable_costs'):
"""
Analysis and Summary of flow attribute keyword of the EnergySystem.
Parameters
----------
des_results : q100opt.DistrictScenario.results
The results Dictionary of the District Scenario class
(a dictionary containing the processed oemof.solph.results
with the key 'main' and the oemof.solph parameters
with the key 'param'.)
keyword : str
Keyword for that values are analyzed,
e.g. variable_costs or emission_factor.
Returns
-------
dict : All relevant data with variable_costs.
Keys of dictionary: 'summary' and 'sequences'.
"""
param = des_results['param']
results = des_results['main']
var_cost_flows = get_attr_flows(des_results, key=keyword)
df = pd.DataFrame(index=next(iter(results.values()))['sequences'].index)
len_index = len(df)
# define columns of result dataframe
if keyword == 'variable_costs':
key_product = 'costs'
elif keyword == 'emission_factor':
key_product = 'emissions'
else:
key_product = 'product'
for flow in var_cost_flows:
if isinstance(flow[0], solph.Source):
category = 'source'
label = flow[0].label
elif isinstance(flow[0], solph.Transformer):
category = 'converter'
label = flow[0].label
elif isinstance(flow[1], solph.Sink):
category = 'sink'
label = flow[1].label
else:
label = flow[0].label + '-' + flow[1].label
category = 'unknown'
logging.warning(
"Flow/Node category of {} not specified!".format(label)
)
if keyword in param[flow]['scalars'].keys():
df[(category, label, keyword)] = param[flow]['scalars'][keyword]
else:
df[(category, label, keyword)] = \
param[flow]['sequences'][keyword].values[:len_index]
# 2) get flow results
df[(category, label, 'flow')] = results[flow]["sequences"].values
# 3) calc a * b
df[(category, label, key_product)] = \
df[(category, label, keyword)] * df[(category, label, 'flow')]
df.columns = pd.MultiIndex.from_tuples(
list(df.columns), names=('category', 'label', 'value')
)
df.sort_index(axis=1, inplace=True)
df_sum = df.iloc[:, df.columns.isin(['flow', key_product], level=2)].sum()
df_summary = df_sum.unstack(level=2)
df_summary['var_' + key_product + '_av_flow'] = \
df_summary[key_product] / df_summary['flow']
df_mean = \
df.iloc[:, df.columns.get_level_values(2) == keyword].mean().unstack(
level=2).rename(columns={
keyword: 'var_' + key_product + '_av_param'})
df_summary = df_summary.join(df_mean)
return {'sum': df_summary,
'sequences': df}
def get_attr_flows(results, key='variable_costs'):
"""
Return all flows of an EnergySystem for a given attribute,
which is not zero.
Parameters
----------
results : dict
Results dicionary of the oemof.solph optimisation including the
Parameters with key 'param'.
key : str
Returns
-------
list : List of flows, where a non zero attribute value is given either
at the 'scalars' or 'sequences'.
"""
param = results['param']
list_keys = list(param.keys())
var_scalars = [
x for x in list_keys
if key in param[x]['scalars'].keys()
if abs(param[x]['scalars'][key]) > 0
]
var_sequences = [
x for x in list_keys
if key in param[x]['sequences'].keys()
if abs(param[x]['sequences'][key].sum()) > 0
]
var_cost_flows = var_scalars + var_sequences
return var_cost_flows
def get_attr_flow_results(des_results, key='variable_costs'):
"""
Return the parameter and flow results for all flows of an EnergySystem
for a given attribute, which is not zero.
Parameters
----------
des_results : dict
Results of district energy system. Must have the keys: 'main', 'param'.
key : str
Flow attribute.
Returns
-------
pd.DataFrame : Multiindex DataFrame.
- Index : Timeindex of oemof.solph.EnergySystem.
- First column index level: <from>-<to>, where from an to are the
labels of the Nodes.
- Second column index level:
- attribute parameter
- resulting flow value
- product of parameter and flow column
"""
attr_flows = get_attr_flows(des_results, key=key)
param = des_results['Param']
results = des_results['Main']
df = pd.DataFrame(index=next(iter(results.values()))['sequences'].index)
len_index = len(df)
for flow in attr_flows:
label = flow[0].label + '-' + flow[1].label
# 1) get parameters
if key in param[flow]['scalars'].keys():
df[(label, key)] = param[flow]['scalars'][key]
else:
df[(label, key)] = param[flow]['sequences'][key].values[:len_index]
# 2) get flow results
df[(label, 'flow')] = results[flow]["sequences"].values
# 3) calc a * b
if key == 'variable_costs':
key_product = 'costs'
elif key == 'emission_factor':
key_product = 'emissions'
else:
key_product = 'product'
df[(label, key_product)] = df[(label, key)] * df[(label, 'flow')]
df.columns = pd.MultiIndex.from_tuples(
list(df.columns), names=('from-to', 'value')
)
return df
def get_all_sequences(results):
"""..."""
d_node_types = {
'sink': solph.Sink,
'source': solph.Source,
'transformer': solph.Transformer,
'storage_flow': solph.GenericStorage,
}
l_df = []
for typ, solph_class in d_node_types.items():
group = {
k: v["sequences"]
for k, v in results.items()
if k[1] is not None
if isinstance(k[0], solph_class) or isinstance(k[1], solph_class)
}
df = views.convert_to_multiindex(group)
df_mi = df.columns.to_frame()
df_mi.reset_index(drop=True, inplace=True)
df_mi['from'] = [x.label for x in df_mi['from']]
df_mi['to'] = [x.label for x in df_mi['to']]
df_mi['type'] = typ
df.columns = | pd.MultiIndex.from_frame(df_mi[['type', 'from', 'to']]) | pandas.MultiIndex.from_frame |
from path_manager import get_models_path
from gensim.models.wrappers.ldamallet import malletmodel2ldamodel
import os
import json
import numpy as np
import pandas as pd
from gensim.corpora import Dictionary
from gensim.models.wrappers import LdaMallet
from sklearn.metrics import euclidean_distances
class LDAInferencer:
def __init__(
self, corpus_id, model_id
):
# We will use the 0 index convention
CORPUS_ID = corpus_id
MODEL_ID = model_id
self.corpus_id = corpus_id
self.model_id = model_id
self.corpus_part = '_'.join(model_id.split('_')[:-1])
self.num_topics = int(model_id.split('_')[-1])
self.models_path = get_models_path('LDA')
self.model_folder = os.path.join(
self.models_path, f'{CORPUS_ID}-{MODEL_ID}')
self.model_data_folder = os.path.join(self.model_folder, 'data')
self.model = LdaMallet.load(os.path.join(
self.model_data_folder, f'{CORPUS_ID}_lda_model_{MODEL_ID}.mallet.lda'))
self.gensim_model = malletmodel2ldamodel(
self.model, iterations=self.model.iterations)
self.g_dict = Dictionary()
self.g_dict.id2token = self.model.id2word
self.g_dict.token2id = {k: v for v, k in self.g_dict.id2token.items()}
self.normalized_topics = self.model.get_topics()
self.topics = self.model.word_topics
self.documents_topics = pd.read_csv(
os.path.join(self.model_data_folder,
f'doc_topics_{MODEL_ID}_with_details.csv'),
# header='', # Change to True if topic id should be present as the header
index_col=0 # Change to True if the uid should be present as the index
)
self.documents_topics.columns = self.documents_topics.columns.astype(
int)
self.normalized_topics_by_documents = self.documents_topics / \
self.documents_topics.sum()
self.normalized_documents_topics = self.documents_topics.div(
self.documents_topics.sum(axis=1), axis=0)
self.topic_composition_ranges = self._get_topic_composition_ranges()
def get_topic_share(self, topic_id, doc_ids, serialize=False):
if isinstance(doc_ids, str):
doc_ids = [doc_ids]
topic_share = self.normalized_topics_by_documents.reindex(doc_ids)[
topic_id].to_dict()
if serialize:
topic_share = json.dumps(topic_share)
return topic_share
def infer_topics(self, text, topn_topics=None, total_topic_score=None, serialize=False):
if isinstance(text, str):
text = text.split()
if len(text) == 1:
doc_topics = self.gensim_model.get_term_topics(
self.g_dict.token2id[text[0]])
else:
doc = self.g_dict.doc2bow(text)
doc_topics = self.gensim_model[doc]
found_topics = {i for i, v in doc_topics}
print(found_topics)
for i in range(self.model.num_topics):
if i not in found_topics:
doc_topics.append((i, 0))
doc_topics = pd.DataFrame(doc_topics, columns=['topic', 'score'])
doc_topics = doc_topics.sort_values('score', ascending=False)
if total_topic_score is not None:
tdoc_topics = doc_topics[doc_topics.score.cumsum(
) <= total_topic_score]
if tdoc_topics.empty:
doc_topics = doc_topics.head(1)
else:
doc_topics = tdoc_topics
if topn_topics is not None and doc_topics.shape[0] > topn_topics:
doc_topics = doc_topics.head(topn_topics)
# doc_topics['topic'] = doc_topics['topic'].astype(int)
doc_topics = doc_topics.to_dict('records')
if serialize:
doc_topics = json.dumps(doc_topics)
return doc_topics
def get_model_topic_words(self, topn_words=5, total_word_score=None, serialize=False):
payload = []
for topic_id in range(self.num_topics):
topic_words = self.get_topic_words(
topic_id,
topn_words=topn_words,
total_word_score=total_word_score
)
payload.append({'topic_id': topic_id, 'topic_words': topic_words})
if serialize:
payload = json.dumps(payload)
return payload
def get_topic_words(self, topic_id, topn_words=10, total_word_score=None, serialize=False):
topic_id = int(topic_id)
topic_words = pd.DataFrame(self.model.show_topic(
topic_id, topn=topn_words), columns=['word', 'score'])
topic_words = topic_words.sort_values('score', ascending=False)
if total_word_score is not None:
ttopic_words = topic_words[topic_words.score.cumsum(
) <= total_word_score]
if ttopic_words.empty:
topic_words = topic_words.head(1)
else:
topic_words = ttopic_words
if topn_words is not None and topic_words.shape[0] > topn_words:
topic_words = topic_words.head(topn_words)
topic_words = topic_words.to_dict('records')
if serialize:
topic_words = json.dumps(topic_words)
return topic_words
def get_doc_topic_words(self, text, topn_topics=10, topn_words=10, total_topic_score=1, total_word_score=1, serialize=False):
doc_topics = self.infer_topics(
text, topn_topics=topn_topics, total_topic_score=total_topic_score)
doc_topic_words = []
for dt in doc_topics:
topic = dt['topic']
topic_score = dt['score']
topic_words = self.get_topic_words(
topic, topn_words=topn_words, total_word_score=total_word_score)
topic_data = {'topic': topic, 'score': topic_score}
topic_data['words'] = topic_words
doc_topic_words.append(topic_data)
doc_topic_words = pd.DataFrame(doc_topic_words).to_dict('records')
if serialize:
doc_topic_words = json.dumps(doc_topic_words)
return doc_topic_words
def get_doc_topic_words_by_id(self, doc_id, topn_topics=10, topn_words=10, total_topic_score=1, total_word_score=1, serialize=False):
doc_topics = self.get_doc_topic_by_id(
doc_id, topn=topn_topics, serialize=False)
doc_topic_words = []
for dt in doc_topics:
topic = dt['topic']
topic_score = dt['score']
topic_words = self.get_topic_words(
topic, topn_words=topn_words, total_word_score=total_word_score)
topic_data = {'topic': topic, 'score': topic_score}
topic_data['words'] = topic_words
doc_topic_words.append(topic_data)
doc_topic_words = pd.DataFrame(doc_topic_words).to_dict('records')
if serialize:
doc_topic_words = json.dumps(doc_topic_words)
return doc_topic_words
def get_combined_doc_topic_words(self, text, topn_topics=None, topn_words=None, total_topic_score=0.8, total_word_score=0.2, serialize=False):
doc_topics = self.infer_topics(
text, topn_topics=topn_topics, total_topic_score=total_topic_score)
doc_topic_words = []
for dt in doc_topics:
topic = dt['topic']
topic_score = dt['score']
topic_words = self.get_topic_words(
topic, topn_words=topn_words, total_word_score=total_word_score)
for tw in topic_words:
word = tw['word']
word_score = tw['score']
doc_topic_words.append({
'topic': topic,
'word': word,
'topic_score': topic_score,
'word_score': word_score,
'score': topic_score * word_score
})
doc_topic_words = pd.DataFrame(doc_topic_words).sort_values(
'score', ascending=False).to_dict('records')
if serialize:
doc_topic_words = json.dumps(doc_topic_words)
return doc_topic_words
def get_doc_topic_by_id(self, doc_id, topn=None, serialize=False):
doc = self.documents_topics.loc[doc_id]
if doc.empty:
return []
# Just call is score for consistency
doc.name = 'score'
doc.index.name = 'topic'
doc = doc.sort_values(ascending=False) / doc.sum()
doc.index = doc.index.astype(int)
doc = doc.reset_index()
if topn is not None:
doc = doc.head(topn)
doc = doc.to_dict('records')
if serialize:
doc = json.dumps(doc)
return doc
def get_similar_documents(self, document, topn=10, return_data='id', return_similarity=False, duplicate_threshold=0.01, show_duplicates=True, serialize=False):
doc_topics = self.infer_topics(document)
doc_topics = pd.DataFrame(doc_topics).sort_values(
'topic').set_index('topic')
e_distance = euclidean_distances(doc_topics.score.values.reshape(
1, -1), self.normalized_documents_topics.values).flatten()
if not show_duplicates:
e_distance[e_distance <= duplicate_threshold] = np.inf
payload = []
for rank, top_sim_ix in enumerate(e_distance.argsort()[:topn], 1):
payload.append(
{'id': self.normalized_documents_topics.iloc[top_sim_ix].name, 'score': e_distance[top_sim_ix], 'rank': rank})
if serialize:
payload = pd.DataFrame(payload).to_json()
return payload
def get_similar_docs_by_id(self, doc_id, topn=10, return_data='id', return_similarity=False, duplicate_threshold=0.01, show_duplicates=True, serialize=False):
doc_topics = self.normalized_documents_topics.loc[doc_id].values.reshape(
1, -1)
e_distance = euclidean_distances(
doc_topics, self.normalized_documents_topics.values).flatten()
if not show_duplicates:
e_distance[e_distance <= duplicate_threshold] = pd.np.inf
payload = []
for rank, top_sim_ix in enumerate(e_distance.argsort()[:topn], 1):
payload.append(
{'id': self.normalized_documents_topics.iloc[top_sim_ix].name, 'score': e_distance[top_sim_ix], 'rank': rank})
if serialize:
payload = pd.DataFrame(payload).to_json()
return payload
def get_docs_by_topic_composition(self, topic_percentage, topn=10, return_data='id', closest_to_minimum=False, return_similarity=False, duplicate_threshold=0.01, show_duplicates=True, serialize=False):
'''
topic_percentage (dict): key (int) corresponds to topic id and value (float [0, 1]) corresponds to the expected topic percentage.
'''
topic_percentage = | pd.Series(topic_percentage) | pandas.Series |
# -*- coding:utf-8 -*-
"""
#====#====#====#====
# Project Name: RNN-SignalProcess
# File Name: TestSomeFunction
# Date: 3/1/18 7:45 PM
# Using IDE: PyCharm Community Edition
# From HomePage: https://github.com/DuFanXin/RNN
# Author: DuFanXin
# BlogPage: http://blog.csdn.net/qq_30239975
# E-mail: <EMAIL>
# Copyright (c) 2018, All Rights Reserved.
#====#====#====#====
"""
import tensorflow as tf
import argparse
import os
def write_img_to_tfrecords():
import numpy as np
import pandas as pd
type_to_num = {
'galaxy': 0,
'qso': 1,
'star': 2,
'unknown': 3
}
train_set_writer = tf.python_io.TFRecordWriter(os.path.join(FLAGS.data_dir, 'train_set.tfrecords'))
validation_set_writer = tf.python_io.TFRecordWriter(os.path.join(FLAGS.data_dir, 'validation_set.tfrecords'))
train_set = pd.read_csv(filepath_or_buffer=os.path.join(FLAGS.data_dir, 'train_set.csv'), header=0, sep=',')
# splite_merge_csv()
# print(train_set.head())
row_num = train_set.shape[0]
for index, row in train_set.iterrows():
# print(row['id'])
train_list = np.loadtxt(
os.path.join('../data/first_train_data_20180131', '%d.txt' % row['id']), delimiter=",", skiprows=0, dtype=np.float32)
example = tf.train.Example(features=tf.train.Features(feature={
'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[type_to_num[row['type']]])),
'signal': tf.train.Feature(bytes_list=tf.train.BytesList(value=[train_list.tobytes()]))
}))
train_set_writer.write(example.SerializeToString()) # 序列化为字符串
if index % 100 == 0:
print('Done train_set writing %.2f%%' % (index / row_num * 100))
train_set_writer.close()
print("Done train_set writing")
validation_set = pd.read_csv(filepath_or_buffer=os.path.join(FLAGS.data_dir, 'validation_set.csv'), header=0, sep=',')
# splite_merge_csv()
# print(validation_set.head())
row_num = validation_set.shape[0]
for index, row in validation_set.iterrows():
# print(row['type'])
validation_list = np.loadtxt(
os.path.join('../data/first_train_data_20180131', '%d.txt' % row['id']), delimiter=",", skiprows=0, dtype=np.float32)
example = tf.train.Example(features=tf.train.Features(feature={
'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[type_to_num[row['type']]])),
'signal': tf.train.Feature(bytes_list=tf.train.BytesList(value=[validation_list.tobytes()]))
}))
validation_set_writer.write(example.SerializeToString()) # 序列化为字符串
if index % 100 == 0:
print('Done validation_set writing %.2f%%' % (index / row_num * 100))
validation_set_writer.close()
print("Done validation_set writing")
def test():
import numpy as np
# import pandas as pd
type_to_num = {
'galaxy': 0,
'qso': 1,
'star': 2,
'unknown': 3
}
train_set_writer = tf.python_io.TFRecordWriter(os.path.join(FLAGS.data_dir, 'train_set_test.tfrecords')) # 要生成的文件
train_list = np.loadtxt(os.path.join('../data', '%d.txt' % 696220), delimiter=",", skiprows=0, dtype=np.float32)
example = tf.train.Example(features=tf.train.Features(feature={
'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[type_to_num['star']])),
'signal': tf.train.Feature(bytes_list=tf.train.BytesList(value=[train_list.tobytes()]))
}))
train_set_writer.write(example.SerializeToString()) # 序列化为字符串
train_set_writer.close()
def read_image(file_queue):
reader = tf.TFRecordReader()
# key, value = reader.read(file_queue)
_, serialized_example = reader.read(file_queue)
features = tf.parse_single_example(
serialized_example,
features={
'label': tf.FixedLenFeature([], tf.int64),
'signal': tf.FixedLenFeature([], tf.string)
})
signal = tf.decode_raw(features['signal'], tf.float32)
# print('image ' + str(image))
# image = tf.reshape(image, [INPUT_IMG_WIDE, INPUT_IMG_HEIGHT, INPUT_IMG_CHANNEL])
# image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# image = tf.image.resize_images(image, (IMG_HEIGHT, IMG_WIDE))
# signal = tf.cast(features['signal'], tf.float32)
signal = tf.reshape(signal, [2600, 1])
# label = tf.decode_raw(features['label'], tf.int64)
label = tf.cast(features['label'], tf.int32)
# label = tf.reshape(label, [OUTPUT_IMG_WIDE, OUTPUT_IMG_HEIGHT])
# label = tf.decode_raw(features['image_raw'], tf.uint8)
# print(label)
# label = tf.reshape(label, shape=[1, 4])
return signal, label
def read_check_tfrecords():
train_file_path = os.path.join(FLAGS.data_dir, 'train_set.tfrecords')
train_image_filename_queue = tf.train.string_input_producer(
string_tensor=tf.train.match_filenames_once(train_file_path), num_epochs=1, shuffle=True)
train_images, train_labels = read_image(train_image_filename_queue)
# one_hot_labels = tf.to_float(tf.one_hot(indices=train_labels, depth=CLASS_NUM))
with tf.Session() as sess: # 开始一个会话
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
signal, label = sess.run([train_images, train_labels])
print(signal)
print(label)
# print(sess.run(one_hot_labels))
coord.request_stop()
coord.join(threads)
print("Done reading and checking")
def splite_merge_csv():
import pandas as pd
df = pd.read_csv(filepath_or_buffer='../data/first_train_index_20180131.csv', header=0, sep=',')
train_set = pd.DataFrame()
validate_set = pd.DataFrame()
# print(df.head())
grouped = df.groupby('type')
print(grouped.count())
for name, group in grouped:
if name == 'galaxy':
train_set = pd.concat([train_set, group[:5200]])
validate_set = pd.concat([validate_set, group[5200:]])
elif name == 'qso':
train_set = pd.concat([train_set, group[:1300]])
validate_set = pd.concat([validate_set, group[1300:]])
elif name == 'star':
train_set = pd.concat([train_set, group[:140000]])
validate_set = pd.concat([validate_set, group[140000:140969]])
elif name == 'unknown':
print(name)
train_set = pd.concat([train_set, group[:34000]])
validate_set = pd.concat([validate_set, group[34000:]])
print('train_set')
print(train_set.count(axis=0))
print('validate_set')
print(validate_set.count(axis=0))
train_set.sample(frac=1).to_csv(path_or_buf='../data/train_set.csv')
validate_set.sample(frac=1).to_csv(path_or_buf='../data/validation_set.csv')
print('Done splite and merge csv')
def write_csv():
import pandas as pd
num_to_type = {
0: 'galaxy',
1: 'qso',
2: 'star',
3: 'unknown'
}
type_to_num = {
'galaxy': 0,
'qso': 1,
'star': 2,
'unknown': 3
}
test_set = pd.read_csv(filepath_or_buffer=os.path.join(FLAGS.data_dir, 'test_set_test.csv'), header=0, sep=',')
test_arr = test_set.values
print(test_arr.shape)
# for index, row in test_set.iterrows():
# row['prediction'] = 1
test_arr[0][-1] = 11
test_set = | pd.DataFrame(data=test_arr, columns=['id', 'type', 'prediction']) | pandas.DataFrame |
import random
import spotipy
import requests
import pandas as pd
from sklearn import metrics
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
class Recommend:
'''
Arguments -
client_id - unique client ID
client_secret - unique secret key
username - unique Spotify username
'''
def __init__(self, client_id = None, client_secret = None, username = None):
self.client_id = client_id
self.client_secret = client_secret
self.username = username
self.url = 'https://api.spotify.com/v1/recommendations?'
self.market = 'US'
self.sp = spotipy.Spotify(auth = self.generate_token())
def generate_token(self):
post_response = requests.post('https://accounts.spotify.com/api/token', {
'grant_type': 'client_credentials',
'client_id': self.client_id,
'client_secret': self.client_secret,
})
post_respose_json = post_response.json()
token = post_respose_json['access_token']
return token
def print_response(self, query):
token = self.generate_token()
response = requests.get(query, headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"})
json_response = response.json()
try:
print('Recommendations:\n')
for i, j in enumerate(json_response['tracks']):
print(f"{i+1}) \"{j['name']}\" : {j['artists'][0]['name']}")
print()
except:
print(json_response)
def byArtistSpotify(self, artist = None, number = None):
if artist is None:
print('Enter the artist as a string argument\n')
if number is None:
number = 10
artist_result = self.sp.search(artist)
try:
seed_artists = artist_result['tracks']['items'][0]['artists'][0]['uri'][15:]
seed_genres = []
seed_genres_entire = self.sp.artist(seed_artists)
if len(seed_genres_entire) < 3:
seed_genres = seed_genres_entire
else:
for i in seed_genres_entire['genres'][:3]:
seed_genres.append(i)
query = f'{self.url}limit={number}&market={self.market}&seed_genres={seed_genres}&seed_artists={seed_artists}'
print(self.print_response(query))
except:
print('Seed artists for given artist could not be generated\n')
def byTrackSpotify(self, track_URI = None, number = None):
if track_URI is None:
print('Enter the track_URI as a string argument\n')
if number is None:
number = 10
track_ID = track_URI.split(':')[2]
try:
meta = self.sp.track(track_ID)
artist = meta['album']['artists'][0]['name']
artist_result = self.sp.search(artist)
try:
seed_artists = artist_result['tracks']['items'][0]['artists'][0]['uri'][15:]
seed_genres = []
seed_genres_entire = self.sp.artist(seed_artists)
if len(seed_genres_entire) < 3:
seed_genres = seed_genres_entire
else:
for i in seed_genres_entire['genres'][:3]:
seed_genres.append(i)
query = f'{self.url}limit={number}&market={self.market}&seed_genres={seed_genres}&seed_artists={seed_artists}&seed_tracks={track_ID}'
print(self.print_response(query))
except:
print('Seed artist for given track could not be generated\n')
except:
print('Recheck track_URI argument\n')
def byPlaylistSpotify(self, playlist_URL = None, number = None):
if number is None:
number = 10
if playlist_URL is None:
print('Recheck playlist_URL argument\n')
playlist_id = playlist_URL[34:]
df = pd.DataFrame(columns = ['Name', 'Album', 'Artist', 'Year', 'Duration', 'Danceability', 'Energy'])
track_ids = []
for i in self.sp.playlist(playlist_id)['tracks']['items']:
track_ids.append(i['track']['id'])
for i in track_ids:
meta = self.sp.track(i)
features = self.sp.audio_features(i)
track_dict = {
'Name' : meta['name'],
'Album' : meta['album']['name'],
'Artist' : meta['album']['artists'][0]['name'],
'Year' : meta['album']['release_date'][0:4],
'Duration' : meta['duration_ms'] * 0.001,
'Danceability' : features[0]['danceability'],
'Energy' : features[0]['energy']
}
df = df.append(track_dict, ignore_index = True, sort = False)
common_artist = self.sp.search(df['Artist'].value_counts().head(1))
seed_artists = common_artist['tracks']['items'][0]['artists'][0]['uri'][15:]
seed_genres = []
seed_genres_entire = self.sp.artist(seed_artists)
if len(seed_genres_entire) < 3:
seed_genres = seed_genres_entire
else:
for i in seed_genres_entire['genres'][:3]:
seed_genres.append(i)
seed_tracks = random.choice(track_ids)
target_danceability = round(df['Danceability'].mean(), 1)
target_energy = round(df['Energy'].mean(), 1)
try:
query = f'{self.url}limit={number}&market={self.market}&seed_genres={seed_genres}'
query += f'&target_danceability={target_danceability}'
query += f'&target_energy={target_energy}'
query += f'&seed_artists={seed_artists}&seed_tracks={seed_tracks}'
print(self.print_response(query))
except:
print('Query could not be executed\n')
def byAudioFeaturesSpotify(self, target_acousticness = None, target_danceability = None, target_duration_ms = None, target_energy = None, target_instrumentalness = None, target_key = None, target_liveness = None, target_loudness = None, target_mode = None, target_popularity = None, target_speechiness = None, target_tempo = None, target_time_signature = None, target_valence = None, artist = None):
if artist is None:
print('Enter the artist as a string argument\n')
artist_result = self.sp.search(artist)
try:
seed_artists = artist_result['tracks']['items'][0]['artists'][0]['uri'][15:]
seed_genres = []
seed_genres_entire = self.sp.artist(seed_artists)
if len(seed_genres_entire) < 3:
seed_genres = seed_genres_entire
else:
for i in seed_genres_entire['genres'][:3]:
seed_genres.append(i)
query = f'{self.url}limit={10}&market={self.market}&seed_genres={seed_genres}'
if target_acousticness is not None:
query += f'&target_acousticness={target_acousticness}'
if target_danceability is not None:
query += f'&target_danceability={target_danceability}'
if target_duration_ms is not None:
query += f'target_duration_ms={target_duration_ms}'
if target_energy is not None:
query += f'target_energy={target_energy}'
if target_instrumentalness is not None:
query += f'target_instrumentalness={target_instrumentalness}'
if target_key is not None:
query += f'target_key={target_key}'
if target_liveness is not None:
query += f'target_liveness={target_liveness}'
if target_loudness is not None:
query += f'target_loudness={target_loudness}'
if target_mode is not None:
query += f'target_mode={target_mode}'
if target_popularity is not None:
query += f'target_popularity={target_popularity}'
if target_speechiness is not None:
query += f'target_speechiness={target_speechiness}'
if target_tempo is not None:
query += f'target_tempo={target_tempo}'
if target_time_signature is not None:
query += f'target_time_signature={target_time_signature}'
if target_valence is not None:
query += f'target_valence={target_valence}'
query += f'&seed_artists={seed_artists}'
print(self.print_response(query))
except:
print('Seed artists for given artist could not be generated\n')
def byTrack(self, track_URL = None, number = None, query = None, cluster = None):
if track_URL is None:
print('Recheck track_URL argument\n')
track_ID = track_URL[31:].split('?')[0]
if number is None:
number = 10
if query is None and cluster is None:
print('Specify method of recommendation as boolean argument\n')
if query is True and cluster is True:
print('Specify single method of recommendation as boolean argument\n')
if query == True:
meta = self.sp.track(track_ID)
features = self.sp.audio_features(track_ID)
target_year = meta['album']['release_date'][0:4]
target_popularity = meta['popularity']
target_danceability = features[0]['danceability']
target_energy = features[0]['energy']
tracks_df = pd.read_csv('tracks.csv')
try:
results = | pd.DataFrame() | pandas.DataFrame |
#import warnings
#warnings.filterwarnings('ignore')
import pandas as pd
import pickle
train=pd.read_csv('Train.csv')
test = | pd.read_csv("Test.csv") | pandas.read_csv |
"""
Module to provide processing functions for ISS data.
"""
from pilates import wrds_module
import pandas as pd
import numpy as np
import os, zipfile, re
from codecs import open
from rapidfuzz import fuzz
class iss(wrds_module):
""" Class providing main processing methods for the ISS data.
One instance of this classs is automatically created and accessible from
any data object instance.
Args:
d (pilates.data): Instance of pilates.data object.
"""
def __init__(self, d):
wrds_module.__init__(self, d)
def cik_from_gvkey(self, data):
""" Returns ISS cik from Compustat gvkey.
"""
None
def gvkey_from_cik(self, data):
""" Returns COMPUSTAT gvkey from ISS cik.
"""
df_iss = self.open_data(self.companyfy, columns=['cik','companyname','ticker','cusip'])
df_comp = self.d.comp.open_data(self.d.comp.names, columns=['gvkey','conm','cik','tic','cusip'])
# Clean ISS companies names
for c in [',','.']:
df_iss['companyname'] = df_iss.companyname.str.replace(c,'',regex=False)
df_iss['companyname'] = df_iss.companyname.str.upper()
df_iss = df_iss.drop_duplicates()
df = df_iss
#############
# Using CIK #
#############
df = pd.merge(df, df_comp[['cik', 'gvkey']], how='left')
# Only 157 missing CIKs, so we stop here for now.
######################
# Merge to user data #
######################
df_gvkey = df[['cik','gvkey']].drop_duplicates().dropna()
dfin = pd.merge(data['cik'], df_gvkey, how='left')
# Return the gvkeys
dfin.index = data.index
return dfin.gvkey
def get_link_iss_execucomp(self):
####################################################
# Create a general match between Execucomp and ISS #
####################################################
fields_iss = ['cik', 'participantid', 'fullname', 'lastname', 'middlename', 'firstname']
fields_exe = ['gvkey', 'execid', 'exec_fullname', 'exec_lname', 'exec_mname', 'exec_fname']
dfiss = self.open_data(self.participantfy, fields_iss)
dfexe = self.d.execucomp.open_data(self.d.execucomp.anncomp, fields_exe)
dfiss = dfiss.drop_duplicates()
dfexe = dfexe.drop_duplicates()
# Add gvkey information
dfiss['gvkey'] = self.gvkey_from_cik(dfiss)
dfexe['lastname'] = dfexe.exec_lname
dfexe['middlename'] = dfexe.exec_mname
dfexe['firstname'] = dfexe.exec_fname
dfexe['fullname'] = dfexe.exec_fullname
##### Name cleanup #####
# Remove '.', ',' and all cap
for col in ['lastname', 'middlename', 'firstname', 'fullname']:
for char in [',', '.']:
dfiss[col] = dfiss[col].str.replace(char, '', regex=False)
dfexe[col] = dfexe[col].str.replace(char, '', regex=False)
dfiss[col] = dfiss[col].str.upper()
dfexe[col] = dfexe[col].str.upper()
##### Merge #####
### Start from most strict to more lax and keep 1-1 matches
dfkeeps = []
## For same firm ##
# For same (lastname, middlename, firstname)
# or same (lastname, firstname), allow for 1-N and N-1 matches
keys = [['lastname', 'middlename', 'firstname'], ['lastname', 'firstname']]
for key in keys:
df = | pd.merge(dfiss[key+['gvkey', 'participantid']], dfexe[key+['gvkey', 'execid']]) | pandas.merge |
import pandas as pd
# read the csv
input_name = "heart.csv"
df = | pd.read_csv(input_name, parse_dates=["dateTime"], infer_datetime_format=True) | pandas.read_csv |
"""
Created on Sun Sep 22 08:24:36 2019
@author: <NAME>
@contact: <EMAIL>
Structure outline:
HTML page with pandas read html parser
"""
import requests
import pandas as pd
from Market.exc_parser import exc_parser
from Market.gen_process_params import gen_proc_params
class DL_parser(exc_parser):
""" Shanghai exchange parser
"""
def __init__( self ):
self.__col_names = ["Dates","Code","Open","High","Low","Close","OPI","Vol"]
self.__exc_name = "DL"
self.__URL_TEMPL = "http://www.dce.com.cn/publicweb/quotesdata/dayQuotesCh.html"
self.__headers = { 'Content-Type': 'application/x-www-form-urlencoded',
'Cookie': 'JSESSIONID=34581314E8E6F047ABE7D22180DCE3A2; WMONID=-b8uBX4vHDi; Hm_lvt_a50228174de2a93aee654389576b60fb=1567732473,1568333912,1568936184,1569113640; Hm_lpvt_a50228174de2a93aee654389576b60fb=1569113660',
'Referer': 'http://www.dce.com.cn/publicweb/quotesdata/dayQuotesCh.html',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'
}
self.__payload = { 'dayQuotes.variety': 'all',
'dayQuotes.trade_type': '0',
'year': 0,
'month':0,
'day': 0,
}
self.__name_map = {"豆一":"a","豆二":"b","乙二醇":"eg","焦煤":"jm","焦炭":"j",
"铁矿石":"i","聚氯乙烯":"pvc","聚丙烯":"pp","聚乙烯":"pe","豆粕":"m",
"豆油":"y","棕榈油":"p","鸡蛋":"jd","玉米淀粉":"cs","玉米":"c"}
self.__datas = []
def _get_URL_TEMP(self):
# update exchange url
return self.__URL_TEMPL
def _read_html_format(self,page,dates):
df = | pd.read_html(page,skiprows=0) | pandas.read_html |
from calendar import month
import pandas as pd
import numpy as np
import os.path as path
import datetime
from datetime import datetime
from pathlib import Path, PurePath
# try:
# from urllib.parse import urlparse
# from urllib.request import request
# except ImportError:
# try:
# from urlparse import urlparse
# from urlrequest import request
# except ImportError:
# print('Sorry ran out of options')
import urllib.parse as urlparse
import urllib.request as request, json
def import_func(file_or_url):
"""Private function to use the file extension to determine
the Pandas import function to use"""
# Locate the text at the last period '.'
_, extn = path.splitext(file_or_url.lower())
extn_fns = { '.csv':pd.read_csv
, '.xls':pd.read_excel
, '.xlsx':pd.read_excel
, '.json':pd.read_json
, '.html':pd.read_html
}
return extn_fns[extn]
def is_valid_url(url):
"""Determines if the URL is valid or not."""
rslt = urlparse.urlparse(url)
is_url = all([rslt.scheme, rslt.netloc, rslt.path])
is_url_valid = False
if is_url:
try:
with request.urlopen(url) as resp:
is_url_valid = (resp.status == 200)
except Exception as exn:
is_url_valid = False
print(f'There was an error {exn}')
else:
is_url_valid = True
return is_url_valid
def Get_json(json_path:str):
if is_valid_url(json_path):
with request.urlopen(json_path) as url:
data = json.loads(url.read().decode())
if data["Response"]=="Success":
all_data = data
else:
print("NOT ABLE TO READ THE DATA!!!")
all_data = None
elif(path.exists(json_path)):
with open(json_path) as f:
data = json.load(f)
if data is not None:
all_data = data
else:
all_data = None
return all_data
def apply_index(df, idx=None):
"""Apply an index to a dataframe"""
right_type = type(idx) == list or type(idx) == str
idx_in_df = set(idx).issubset(set(df))
not_already_idxd = not (set(idx).issubset(df.index.names) or idx in df.index.names)
if right_type and idx_in_df and not_already_idxd:
df.set_index(keys=idx,inplace=True)
else:
# TODO: Fix this later, check if the index already exists
#print(f"Index not applied {idx} [Type {right_type}] [Subset {idx_in_df}]")
pass
return df
def CreateDataFrame(file_name, idx=None, remove_nulls=True):
"""Creates a DataFrame the way I like it."""
if type(file_name) == str:
file_name = file_name.strip()
elif file_name.issubclass(PurePath):
pass
state = []
apply_func = import_func(file_name)
if is_valid_url(file_name):
state.append('Valid url')
else:
if not path.exists(file_name):
print(f'Had problems locating the data [{file_name}]')
return pd.DataFrame()
# Import File based on file extension
df = apply_func(file_name, index_col=idx, parse_dates=True, infer_datetime_format=True)
state.append('imported')
if remove_nulls:
df.dropna(inplace=True)
print(state)
return df
# Check if the object is a list or dictionary
Is_Iterable = lambda objX: ('__getitem__' in dir(objX) or '__iter__' in dir(objX))
# Split up the componets of a list of tuples
unzip_list = lambda zlst: list(zip(*zlst))
# Get the Quarter, based on the date
Qtr = lambda dtx: (dtx.month+2)//3
# Get the Last Month of the Qtr
Qtr_EOM = lambda dtx: Qtr(dtx) * 3
# Let's put it all together
def Last_Day_Qtr(dtx:datetime.date):
"""Get the last day of the month in a Given Qtr"""
Qtr_Last_Day_LU = {1:31,2:30,3:30,4:31}
yr = dtx.year
m = Qtr_EOM(dtx)
d = Qtr_Last_Day_LU[ Qtr(dtx) ]
return datetime(yr,m,d)
# Get the number of days from the End of Qtr to given date
Days_2_EoQ = lambda dtx: (Last_Day_Qtr(dtx) - dtx).days
def Get_Qtr(xdate:datetime):
""" This function takes various datetime objects and returns the Quarter associated with the values passed in
So for an array, it will return an array of quarters as string. If a single value was passed the
a single quarter will be returned."""
from calendar import monthrange
import numpy as np
typ = type(xdate)
if typ is datetime or typ is datetime.date:
q = (xdate.month+2)//3
y = xdate.year
return (f"{y}Q{q}")
elif typ is np.ndarray:
lst_date = [Get_Qtr(x) for x in xdate]
return lst_date
elif typ is pd.Series or typ is list:
lst_date = [Get_Qtr(x[1]) for x in xdate.iteritems()]
return lst_date
elif typ is pd.core.indexes.multi.MultiIndex:
lst_date = [Get_Qtr(x[1].date()) for x in xdate.values]
return lst_date
elif typ is pd.core.indexes.datetimes.DatetimeIndex:
lst_date = [Get_Qtr(x.date()) for x in xdate]
return lst_date
elif typ is str:
return Get_Qtr(datetime.strptime(xdate, '%Y-%m-%d'))
else:
#print(f'The type is {type(xdate)} not planned for.')
d = xdate
q = (d.month+2)//3
y = d.year
return (f"{y}Q{q}")
def Get_Date(xdate:datetime=None):
""" This function takes various datetime objects and returns the Date associated with the values passed in.
So for an array, it will return an array of datetime dates. If a single value was passed the
a single date will be returned."""
if xdate is None:
return None
if 'mro' in (dir(xdate)):
print(f'{xdate.mro()}')
import numpy as np
import pandas as pd
typ = type(xdate)
if typ is datetime or typ is datetime.date:
return datetime(xdate.year,xdate.month,xdate.day)
elif (typ is np.ndarray):
lst_date = [Get_Date(x) for x in xdate]
return lst_date
elif (typ is pd._libs.tslibs.timestamps.Timestamp) and Is_Iterable(xdate):
#print('pd._libs.tslibs.timestamps.Timestamp ITERABLE')
return [Get_Date(x) for x in xdate]
elif (typ is pd._libs.tslibs.timestamps.Timestamp):
#print('pd._libs.tslibs.timestamps.Timestamp')
return | pd.to_datetime(xdate) | pandas.to_datetime |
"""TSV Class"""
import re
import math
import pandas as pd
import random
import json
from omigo_core import tsvutils
from omigo_core import utils
import sys
class TSV:
"""This is the main data processing class to apply different filter and transformation functions
on tsv data and get the results. The design is more aligned with functional programming where
each step generates a new copy of the data"""
header = None
header_map = None
header_index_map = None
header_fields = None
data = None
# constructor
def __init__(self, header, data):
# initialize header and data
self.header = header
self.data = data
# create map of name->index and index->name
self.header_fields = self.header.split("\t")
self.header_map = {}
self.header_index_map = {}
# validation
for h in self.header_fields:
if (len(h) == 0):
raise Exception("Zero length header fields:" + str(self.header_fields))
# create hashmap
for i in range(len(self.header_fields)):
h = self.header_fields[i]
# validation
if (h in self.header_map.keys()):
raise Exception("Duplicate header key:" + str(self.header_fields))
self.header_map[h] = i
self.header_index_map[i] = h
# basic validation
if (len(data) > 0 and len(data[0].split("\t")) != len(self.header_fields)):
raise Exception("Header length is not matching with data length:", len(self.header_fields), len(data[0].split("\t")), str(self.header_fields), str(data[0].split("\t")))
# debugging
def to_string(self):
return "Header: {}, Data: {}".format(str(self.header_map), str(len(self.data)))
# check data format
def validate(self):
# data validation
count = 0
for line in self.data:
count = count + 1
fields = line.split("\t")
if (len(fields) != len(self.header_fields)):
raise Exception("Header length is not matching with data length. position: {}, len(header): {}, header: {}, len(fields): {}, fields: {}".format(
count, len(self.header_fields), self.header_fields, len(fields), str(fields)))
# return
return self
def __has_col__(self, col):
# validate xcol
return col in self.header_map.keys()
# cols is array of string
def select(self, col_or_cols, inherit_message = ""):
# get matching column and indexes
matching_cols = self.__get_matching_cols__(col_or_cols)
indexes = self.__get_col_indexes__(matching_cols)
# create new header
new_header = "\t".join(matching_cols)
# create new data
counter = 0
new_data = []
for line in self.data:
# report progress
counter = counter + 1
utils.report_progress("select: [1/1] selecting columns", inherit_message, counter, len(self.data))
fields = line.split("\t")
new_fields = []
for i in indexes:
if (i >= len(fields)):
raise Exception("Invalid index: ", col_or_cols, matching_cols, indexes, line, fields, len(fields), len(self.header_fields), self.header_map)
new_fields.append(fields[i])
new_data.append("\t".join(new_fields))
# return
return TSV(new_header, new_data)
def values_not_in(self, col, values, inherit_message = ""):
inherit_message2 = inherit_message + ": values_not_in" if (inherit_message != "") else "values_not_in"
return self.filter([col], lambda x: x not in values, inherit_message = inherit_message2)
def values_in(self, col, values, inherit_message = ""):
inherit_message2 = inherit_message + ": values_in" if (inherit_message != "") else "values_in"
return self.filter([col], lambda x: x in values, inherit_message = inherit_message2)
def not_match(self, col, pattern, inherit_message = ""):
utils.warn("Please use not_regex_match instead")
return self.not_regex_match(col, pattern, inherit_message)
def not_regex_match(self, col, pattern, inherit_message = ""):
inherit_message2 = inherit_message + ": not_regex_match" if (inherit_message != "") else "not_regex_match"
return self.regex_match(col, pattern, condition = False, inherit_message = inherit_message2)
def match(self, col, pattern, inherit_message = ""):
utils.warn("Please use regex_match instead")
return self.regex_match(col, pattern, inherit_message = inherit_message)
def regex_match(self, col, pattern, condition = True, inherit_message = ""):
inherit_message2 = inherit_message + ": regex_match" if (inherit_message != "") else "regex_match"
return self.filter([col], lambda x: (re.match(pattern, x) is not None) == condition, inherit_message = inherit_message2)
def not_eq(self, col, value, inherit_message = ""):
utils.warn("This api can have side effects because of implicit data types conversion in python. Use not_eq_int, not_eq_str or not_eq_float")
inherit_message2 = inherit_message + ": not_eq" if (inherit_message != "") else "not_eq"
return self.filter([col], lambda x: x != value, inherit_message = inherit_message2)
def eq(self, col, value, inherit_message = ""):
utils.warn("This api can have side effects because of implicit data types conversion in python. Use eq_int, eq_str or eq_float")
return self.filter([col], lambda x: x == value, inherit_message = inherit_message2)
def eq_int(self, col, value, inherit_message = ""):
inherit_message2 = inherit_message + ": eq_int" if (inherit_message != "") else "eq_int"
return self.filter([col], lambda x: int(float(x)) == value, inherit_message = inherit_message2)
def eq_float(self, col, value, inherit_message = ""):
inherit_message2 = inherit_message + ": eq_float" if (inherit_message != "") else "eq_float"
return self.filter([col], lambda x: float(x) == value, inherit_message = inherit_message2)
def eq_str(self, col, value, inherit_message = ""):
inherit_message2 = inherit_message + ": eq_str" if (inherit_message != "") else "eq_str"
return self.filter([col], lambda x: str(x) == str(value), inherit_message = inherit_message2)
def not_eq_str(self, col, value, inherit_message = ""):
inherit_message2 = inherit_message + ": not_eq_str" if (inherit_message != "") else "not_eq_str"
return self.filter([col], lambda x: str(x) != str(value), inherit_message = inherit_message2)
def is_nonzero(self, col, inherit_message = ""):
utils.warn("Deprecated. Use is_nonzero_float() instead")
inherit_message2 = inherit_message + ": is_nonzero" if (len(inherit_message) > 0) else "is_nonzero"
return self.is_nonzero_float(col, inherit_message = inherit_message2)
def is_nonzero_int(self, col, inherit_message = ""):
inherit_message2 = inherit_message + ": is_nonzero_int" if (len(inherit_message) > 0) else "is_nonzero_int"
return self.filter([col], lambda x: int(x) != 0, inherit_message = inherit_message2)
def is_nonzero_float(self, col, inherit_message = ""):
inherit_message2 = inherit_message + ": is_nonzero_float" if (len(inherit_message) > 0) else "is_nonzero_float"
return self.filter([col], lambda x: float(x) != 0, inherit_message = inherit_message2)
def lt_str(self, col, value, inherit_message = ""):
inherit_message2 = inherit_message + ": lt_str" if (len(inherit_message) > 0) else "lt_str"
return self.filter([col], lambda x: x < value, inherit_message = inherit_message2)
def le_str(self, col, value, inherit_message = ""):
inherit_message2 = inherit_message + ": le_str" if (len(inherit_message) > 0) else "le_str"
return self.filter([col], lambda x: x <= value, inherit_message = inherit_message2)
def gt_str(self, col, value, inherit_message = ""):
inherit_message2 = inherit_message + ": gt_str" if (len(inherit_message) > 0) else "gt_str"
return self.filter([col], lambda x: x > value, inherit_message = inherit_message2)
def ge_str(self, col, value, inherit_message = ""):
inherit_message2 = inherit_message + ": ge_str" if (len(inherit_message) > 0) else "ge_str"
return self.filter([col], lambda x: x >= value, inherit_message = inherit_message2)
def gt(self, col, value, inherit_message = ""):
utils.warn("Deprecated. Use gt_float() instead")
inherit_message2 = inherit_message + ": gt" if (len(inherit_message) > 0) else "gt"
return self.gt_float(col, value, inherit_message = inherit_message2)
def gt_int(self, col, value, inherit_message = ""):
inherit_message2 = inherit_message + ": gt_int" if (len(inherit_message) > 0) else "gt_int"
return self.filter([col], lambda x: int(float(x)) > int(float(value)), inherit_message = inherit_message2)
def gt_float(self, col, value, inherit_message = ""):
inherit_message2 = inherit_message + ": gt_float" if (len(inherit_message) > 0) else "gt_float"
return self.filter([col], lambda x: float(x) > float(value), inherit_message = inherit_message2)
def ge(self, col, value, inherit_message = ""):
utils.warn("Deprecated. Use ge_float() instead")
inherit_message2 = inherit_message + ": ge" if (len(inherit_message) > 0) else "ge"
return self.ge_float(col, value, inherit_message = inherit_message2)
def ge_int(self, col, value, inherit_message = ""):
inherit_message2 = inherit_message + ": ge_int" if (len(inherit_message) > 0) else "ge_int"
return self.filter([col], lambda x: int(float(x)) >= int(float(value)), inherit_message = inherit_message2)
def ge_float(self, col, value, inherit_message = ""):
inherit_message2 = inherit_message + ": ge_float" if (len(inherit_message) > 0) else "ge_float"
return self.filter([col], lambda x: float(x) >= float(value), inherit_message = inherit_message2)
def lt(self, col, value, inherit_message = ""):
utils.warn("Deprecated. Use lt_float() instead")
inherit_message2 = inherit_message + ": lt" if (len(inherit_message) > 0) else "lt"
return self.lt_float(col, value, inherit_message = inherit_message2)
def lt_int(self, col, value, inherit_message = ""):
inherit_message2 = inherit_message + ": lt_int" if (len(inherit_message) > 0) else "lt_int"
return self.filter([col], lambda x: int(float(x)) < int(float(value)), inherit_message = inherit_message2)
def lt_float(self, col, value, inherit_message = ""):
inherit_message2 = inherit_message + ": lt_float" if (len(inherit_message) > 0) else "lt_float"
return self.filter([col], lambda x: float(x) < float(value), inherit_message = inherit_message2)
def le(self, col, value, inherit_message = ""):
utils.warn("Deprecated. Use le_float() instead")
inherit_message2 = inherit_message + ": le" if (len(inherit_message) > 0) else "le"
return self.filter([col], lambda x: float(x) <= float(value), inherit_message = inherit_message2)
def le_int(self, col, value, inherit_message = ""):
inherit_message2 = inherit_message + ": le_int" if (len(inherit_message) > 0) else "le_int"
return self.filter([col], lambda x: int(float(x)) <= int(float(value)), inherit_message = inherit_message2)
def le_float(self, col, value, inherit_message = ""):
inherit_message2 = inherit_message + ": le_float" if (len(inherit_message) > 0) else "le_float"
return self.filter([col], lambda x: float(x) <= float(value), inherit_message = inherit_message2)
def startswith(self, col, prefix, inherit_message = ""):
inherit_message2 = inherit_message + ": startswith" if (len(inherit_message) > 0) else "startswith"
return self.filter([col], lambda x: x.startswith(prefix), inherit_message = inherit_message2)
def not_startswith(self, col, prefix, inherit_message = ""):
inherit_message2 = inherit_message + ": not_startswith" if (len(inherit_message) > 0) else "not_startswith"
return self.exclude_filter([col], lambda x: x.startswith(prefix), inherit_message = inherit_message2)
def endswith(self, col, suffix, inherit_message = ""):
inherit_message2 = inherit_message + ": endswith" if (len(inherit_message) > 0) else "endswith"
return self.filter([col], lambda x: x.endswith(suffix), inherit_message = inherit_message2)
def not_endswith(self, col, suffix, inherit_message = ""):
inherit_message2 = inherit_message + ": not_endswith" if (len(inherit_message) > 0) else "not_endswith"
return self.exclude_filter([col], lambda x: x.endswith(suffix), inherit_message = inherit_message2)
def replace_str_inline(self, cols, old_str, new_str, inherit_message = ""):
inherit_message2 = inherit_message + ": replace_str_inline" if (len(inherit_message) > 0) else "replace_str_inline"
return self.transform_inline(cols, lambda x: x.replace(old_str, new_str), inherit_message = inherit_message2)
def group_count(self, cols, prefix = "group", collapse = True, precision = 6, inherit_message = ""):
# find the matching cols and indexes
cols = self.__get_matching_cols__(cols)
# define new columns
new_count_col = prefix + ":count"
new_ratio_col = prefix + ":ratio"
# validation and suggestion
if (new_count_col in cols or new_ratio_col in cols):
raise Exception("Use a different prefix than: {}".format(prefix))
# call aggregate with collapse=False
inherit_message2 = inherit_message + ":group_count" if (inherit_message != "") else "group_count"
return self.aggregate(cols, [cols[0]], [len], collapse = collapse, inherit_message = inherit_message2) \
.rename(cols[0] + ":len", new_count_col) \
.transform([new_count_col], lambda x: str(int(x) / len(self.data)), new_ratio_col, inherit_message = inherit_message2) \
.reverse_sort(new_count_col) \
.apply_precision(new_ratio_col, precision, inherit_message = inherit_message2)
def ratio(self, col1, col2, new_col, default = 0.0, precision = 6, inherit_message = ""):
return self \
.transform([col1, col2], lambda x, y: float(x) / float(y) if (float(y) != 0) else default, new_col) \
.apply_precision(new_col, precision, inherit_message = inherit_message)
def ratio_const(self, col, denominator, new_col, precision = 6, inherit_message = ""):
return self \
.transform([col], lambda x: float(x) / float(denominator) if (float(denominator) != 0) else default, new_col) \
.apply_precision(new_col, precision, inherit_message = inherit_message)
def apply_precision(self, cols, precision, inherit_message = ""):
inherit_message2 = inherit_message + ": apply_precision" if (len(inherit_message) > 0) else "apply_precision"
return self.transform_inline(cols, lambda x: ("{:." + str(precision) + "f}").format(float(x)), inherit_message = inherit_message2)
# TODO: use skip_rows for better name
def skip(self, count):
return TSV(self.header, self.data[count:])
def skip_rows(self, count):
return self.skip(count)
def last(self, count):
if (count > len(self.data)):
count = len(self.data)
return TSV(self.header, self.data[-count:])
def take(self, count):
# return result
if (count > len(self.data)):
count = len(self.data)
return TSV(self.header, self.data[0:count])
def distinct(self):
new_data = []
key_map = {}
for line in self.data:
if (line not in key_map.keys()):
key_map[line] = 1
new_data.append(line)
return TSV(self.header, new_data)
# TODO: use drop_cols instead coz of better name
def drop(self, col_or_cols, inherit_message = ""):
# get matching column and indexes
matching_cols = self.__get_matching_cols__(col_or_cols)
# find the columns that dont match
non_matching_cols = []
for h in self.header_fields:
if (h not in matching_cols):
non_matching_cols.append(h)
# return
inherit_message2 = inherit_message + ": drop" if (len(inherit_message) > 0) else "drop"
return self.select(non_matching_cols, inherit_message = inherit_message2)
def drop_cols(self, col_or_cols, inherit_message = ""):
return self.drop(col_or_cols, inherit_message)
def drop_if_exists(self, col_or_cols, inherit_message = ""):
# validation
if (col_or_cols is None or len(col_or_cols) == 0):
return self
# convert to array form
if (isinstance(col_or_cols, str)):
col_or_cols = [col_or_cols]
# debug
inherit_message2 = inherit_message + ": drop_if_exists" if (inherit_message != "") else "drop_if_exists"
# iterate through each element and call drop
result = self
for c in col_or_cols:
try:
cols = result.__get_matching_cols__(c)
result = result.drop(cols, inherit_message = inherit_message2)
except:
# ignore
utils.debug("Column (pattern) not found or already deleted during batch deletion: {}".format(c))
# return
return result
# TODO: the select_cols is not implemented properly
def window_aggregate(self, win_col, agg_cols, agg_funcs, winsize, select_cols = None, sliding = False, collapse = True, suffix = "", precision = 2, inherit_message = ""):
# get the matching cols
if (select_cols is None):
select_cols = []
select_cols = self.__get_matching_cols__(select_cols)
# do validation on window column. All values should be unique
if (len(self.col_as_array(win_col)) != len(self.col_as_array_uniq(win_col))):
utils.warn("The windowing column has non unique values: total: {}, uniq: {}. The results may not be correct.".format(len(self.col_as_array(win_col)), len(self.col_as_array_uniq(win_col))))
# this takes unique values for agg column, split them into windows and then run the loop
win_col_values = sorted(list(set(self.col_as_array(win_col))))
# store the number of values
num_win_col_values = len(win_col_values)
# find number of windows
num_windows = int(math.ceil(1.0 * num_win_col_values / winsize)) if (sliding == False) else (num_win_col_values - (winsize - 1))
# map to the window index
win_mapping = {}
win_names_mapping = {}
# assign index
for i in range(num_win_col_values):
win_col_val = win_col_values[i]
win_indexes = []
if (sliding == False):
win_index = int(i / winsize)
win_indexes.append(win_index)
else:
for i1 in range(winsize):
win_index = i - (winsize - 1) + i1
if (win_index >= 0 and (num_win_col_values - win_index) >= winsize):
win_indexes.append(win_index)
# assign the mapping indexes
win_mapping[win_col_val] = win_indexes
# assign window column range
for win_index in range(num_windows):
if (sliding == True):
win_names_mapping[win_index] = (win_col_values[win_index], win_col_values[win_index + winsize - 1])
else:
start_index = win_index * winsize
end_index = int(min(num_win_col_values - 1, win_index * winsize + winsize - 1))
win_names_mapping[win_index] = (win_col_values[start_index], win_col_values[end_index])
# transform and normalize the value of win_col
suffix2 = suffix if (suffix != "") else "window_aggregate"
new_win_col = win_col + ":" + suffix2
new_header = self.header + "\t" + new_win_col
new_data = []
# iterate over data
counter = 0
for line in self.data:
# report progress
counter = counter + 1
utils.report_progress("window_aggregate: [1/1] calling function", inherit_message, counter, len(self.data))
# parse data
fields = line.split("\t")
win_value = fields[self.header_map[win_col]]
win_indexes = win_mapping[win_value]
# explode for multiple windows
for win_index in win_indexes:
(namex, namey) = win_names_mapping[win_index]
namexy = namex + " - " + namey
new_data.append(line + "\t" + str(namexy))
# return
if (collapse == True):
cols2 = select_cols
cols2.append(win_col)
return TSV(new_header, new_data) \
.drop(win_col) \
.rename(new_win_col, win_col) \
.aggregate(cols2, agg_cols, agg_funcs, collapse)
else:
cols2 = select_cols
cols2.append(new_win_col)
return TSV(new_header, new_data) \
.aggregate(cols2, agg_cols, agg_funcs, collapse, precision)
# The signature for agg_func is func(list_of_maps). Each map will get the agg_cols
def group_by_key(self, grouping_cols, agg_cols, agg_func, suffix = "", collapse = True, inherit_message = ""):
# resolve grouping and agg_cols
grouping_cols = self.__get_matching_cols__(grouping_cols)
agg_cols = self.__get_matching_cols__(agg_cols)
# check for validity
if (len(grouping_cols) == 0):
raise Exception("No input columns:", grouping_cols)
# validate grouping cols
for c in grouping_cols:
if (c not in self.header_map.keys()):
raise Exception("grouping col not found:", c, ", columns:", self.header_fields)
# validate agg cols
for c in agg_cols:
if (c not in self.header_map.keys()):
raise Exception("agg col not found:", c, ", columns:", self.header_fields)
# group all the values in the key
grouped = {}
counter = 0
for line in self.data:
# report progress
counter = counter + 1
utils.report_progress("group_by_key: [1/3] grouping: progress", inherit_message, counter, len(self.data))
# parse data
fields = line.split("\t")
# create grouping key
keys = []
values_map = {}
for g in grouping_cols:
keys.append(fields[self.header_map[g]])
for a in agg_cols:
values_map[a] = fields[self.header_map[a]]
keys_str = "\t".join(keys)
if (keys_str not in grouped.keys()):
grouped[keys_str] = []
grouped[keys_str].append(values_map)
# apply the agg func
grouped_agg = {}
counter = 0
for k, vs in grouped.items():
# report progress
counter = counter + 1
utils.report_progress("group_by_key: [2/3] grouping func: progress", inherit_message, counter, len(grouped))
# get fields
vs_map = agg_func(vs)
grouped_agg[k] = vs_map
# determine the set of keys in the aggregation function output
agg_out_keys = {}
# check for special condition for empty data
if (len(grouped) > 0):
for k, vs in grouped_agg.items():
for k2 in vs.keys():
agg_out_keys[k2] = 1
else:
dummy_response = agg_func([])
for k in dummy_response.keys():
agg_out_keys[k] = 1
# validate that none of the agg_func output keys are conflicting with the original tsvs. TODO
utils.print_code_todo_warning("Removing this condition for checking of duplicate names. They are already given a suffix so there is no clash.")
for k in agg_out_keys.keys():
if (k in self.header_map.keys()):
utils.print_code_todo_warning("TODO: Old check: Agg func can not output keys that have the same name as original columns: {}, {}".format(k, str(self.header_fields)))
# create an ordered list of agg output keys
new_cols = sorted(list(agg_out_keys.keys()))
new_cols_names = []
name_suffix = suffix if (suffix != "") else get_func_name(agg_func)
for nc in new_cols:
new_cols_names.append(nc + ":" + name_suffix)
# check for collapse flag and add the agg func value
result = {}
# create header
new_header = None
if (collapse == True):
new_header = "\t".join(utils.merge_arrays([grouping_cols, new_cols_names]))
else:
new_header = "\t".join(utils.merge_arrays([self.header_fields, new_cols_names]))
# create data
new_data = []
counter = 0
for line in self.data:
# report progress
counter = counter + 1
utils.report_progress("group_by_key: [3/3] generating data", inherit_message, counter, len(self.data))
# process data
fields = line.split("\t")
# create grouping key
keys = []
for g in grouping_cols:
keys.append(fields[self.header_map[g]])
keys_str = "\t".join(keys)
# check output data
if (len(grouped_agg[keys_str]) != len(new_cols)):
raise Exception("Error in data and number of output cols:", grouped_agg[keys_str], new_cols)
# get the new values in the correct order
new_col_values = []
vs = grouped_agg[keys_str]
for c in new_cols:
new_col_values.append(str(vs[c]))
# generate the string for new values
v = "\t".join(new_col_values)
# add result according to collapse flag
if (collapse == True):
if (keys_str not in result.keys()):
# append to the output data
new_line = keys_str + "\t" + str(v)
new_data.append(new_line)
result[keys_str] = 1
else:
new_line = line + "\t" + str(v)
new_data.append(new_line)
return TSV(new_header, new_data)
# FIXME
def arg_min(self, grouping_cols, argcols, valcols, suffix = "arg_min", topk = 1, sep = "|", collapse = True):
utils.warn("arg_min is not implemented correctly. Too complicated")
return self.__arg_min_or_max_common__(grouping_cols, argcols, valcols, suffix, topk, sep, -1, collapse = collapse)
def arg_max(self, grouping_cols, argcols, valcols, suffix = "arg_max", topk = 1, sep = "|", collapse = True):
utils.warn("arg_max is not implemented correctly. Too complicated")
return self.__arg_min_or_max_common__(grouping_cols, argcols, valcols, suffix, topk, sep, 1, collapse = collapse)
# grouping_cols are for grouping
# argcols which are returned where valcols values are max or min
# suffix is added to both arg and val. arg are suffixed as :arg, values are suffixed as val1, val2 upto topk
def __arg_min_or_max_common__(self, grouping_cols, argcols, valcols, suffix, topk, sep, sign, collapse = False):
grouping_cols = self.__get_matching_cols__(grouping_cols)
argcols = self.__get_matching_cols__(argcols)
valcols = self.__get_matching_cols__(valcols)
def __arg_max_grouping_func__(vs):
# initialize
max_keys = []
for i in range(len(argcols)):
max_keys.append([])
max_values = []
for i in range(len(valcols)):
max_values.append(sign * float('-inf'))
# iterate over all values
for mp in vs:
# read keys
keys = []
for i in range(len(argcols)):
keys.append(mp[argcols[i]])
# read values
values = []
for i in range(len(valcols)):
values.append(sign * float(mp[valcols[i]]))
# check if a new max has been found
found = False
for i in range(len(values)):
if (max_values[i] < values[i]):
# found a new max
for j in range(len(keys)):
max_keys[j] = [str(keys[j])]
for j in range(len(values)):
max_values[j] = values[j]
found = True
break
elif (max_values[i] > values[i]):
found = True
break
# check for value of found. If it is still true, then it means multiple matches
if (found == False):
for i in range(len(keys)):
max_keys[i].append(str(keys[i]))
# check for topk
result = {}
for i in range(len(argcols)):
result[argcols[i] + ":arg"] = sep.join(max_keys[i][0:topk])
for i in range(len(valcols)):
result[valcols[i] + ":val" + str(i+1)] = str(max_values[i])
# return
return result
# combine both columns
combined_cols = []
for k in argcols:
combined_cols.append(k)
for k in valcols:
combined_cols.append(k)
# remaining validation done by the group_by_key
return self.group_by_key(grouping_cols, combined_cols, __arg_max_grouping_func__, suffix = suffix, collapse = collapse)
# TODO: this use_string_datatype is temporary and needs to be replaced with better design.
def aggregate(self, grouping_col_or_cols, agg_cols, agg_funcs, collapse = True, precision = 6, use_rolling = False, use_string_datatype = False, inherit_message = ""):
# get matching columns
grouping_cols = self.__get_matching_cols__(grouping_col_or_cols)
# define rolling functions
rolling_agg_funcs_map = {"sum": get_rolling_func_update_sum, "min": get_rolling_func_update_min, "max": get_rolling_func_update_max, "mean": get_rolling_func_update_mean,
"len": get_rolling_func_update_len}
# validation on number of grouping cols
if (len(grouping_cols) == 0 or len(agg_cols) == 0):
raise Exception("No input columns:", grouping_cols, agg_cols, suffix)
# validation on number of agg funcs
if (len(agg_cols) != len(agg_funcs)):
raise Exception("Aggregate functions are not of correct size")
# validation
indexes = self.__get_col_indexes__(grouping_cols)
# check for column to be aggregated
new_cols = []
for i in range(len(agg_cols)):
agg_col = agg_cols[i]
if (agg_col not in self.header_map.keys()):
raise Exception("Column not found: ", str(agg_col) + ", header:", str(self.header_fields))
new_cols.append(agg_col + ":" + get_func_name(agg_funcs[i]))
# take the indexes
agg_col_indexes = []
rolling_agg_col_indexes_map = {}
rolling_agg_funcs = []
# for each agg col, add the index
for i in range(len(agg_cols)):
agg_col = agg_cols[i]
agg_index = self.header_map[agg_col]
agg_col_indexes.append(agg_index)
# prepare map of indexes of rolling_agg functions
if (get_func_name(agg_funcs[i]) in rolling_agg_funcs_map.keys()):
rolling_agg_col_indexes_map[i] = 1
rolling_agg_funcs.append(rolling_agg_funcs_map[get_func_name(agg_funcs[i])])
# create a map to store array of values
value_map_arr = [{} for i in range(len(agg_col_indexes))]
# TODO: This rolling aggregation needs to be removed
# rolling_value_map_arr = [{} for i in range(len(agg_col_indexes))]
# iterate over the data
counter = 0
for line in self.data:
# report progress
counter = counter + 1
utils.report_progress("aggregate: [1/2] building groups", inherit_message, counter, len(self.data))
# process data
fields = line.split("\t")
# new col values
col_values = []
for i in indexes:
col_values.append(fields[i])
cols_key = "\t".join(col_values)
# for each possible aggregation, do this
for j in range(len(agg_col_indexes)):
if (cols_key not in value_map_arr[j].keys()):
value_map_arr[j][cols_key] = []
# check for rolling functions
if (use_rolling and j in rolling_agg_col_indexes_map.keys()):
if (len(value_map_arr[j][cols_key]) == 0):
value_map_arr[j][cols_key] = get_rolling_func_init(get_func_name(agg_funcs[j]))
#get_rolling_func_update(value_map_arr[j][cols_key], float(fields[agg_col_indexes[j]]), get_func_name(agg_funcs[j]))
rolling_agg_funcs[j](value_map_arr[j][cols_key], float(fields[agg_col_indexes[j]]))
else:
# TODO: this is a hack on datatype
if (use_string_datatype == False):
try:
value_map_arr[j][cols_key].append(float(fields[agg_col_indexes[j]]))
except ValueError:
value_map_arr[j][cols_key].append(fields[agg_col_indexes[j]])
else:
value_map_arr[j][cols_key].append(str(fields[agg_col_indexes[j]]))
# compute the aggregation
value_func_map_arr = [{} for i in range(len(agg_col_indexes))]
# for each possible index, do aggregation
for j in range(len(agg_col_indexes)):
for k, vs in value_map_arr[j].items():
# check for rolling functions
if (use_rolling and j in rolling_agg_col_indexes_map.keys()):
value_func_map_arr[j][k] = get_rolling_func_closing(vs, get_func_name(agg_funcs[j]))
else:
value_func_map_arr[j][k] = agg_funcs[j](vs)
# create new header and data
new_header = "\t".join(utils.merge_arrays([self.header_fields, new_cols]))
new_data = []
# for each output line, attach the new aggregate value
counter = 0
cols_key_map = {}
for line in self.data:
# report progress
counter = counter + 1
utils.report_progress("aggregate: [2/2] calling function", inherit_message, counter, len(self.data))
# data processing
fields = line.split("\t")
col_values = []
for i in indexes:
col_values.append(fields[i])
cols_key = "\t".join(col_values)
# append the aggregated values
agg_values = []
for j in range(len(agg_col_indexes)):
agg_values.append(str(value_func_map_arr[j][cols_key]))
# check for different flags
if (collapse == False or cols_key not in cols_key_map.keys()):
new_line = "\t".join(utils.merge_arrays([fields, agg_values]))
new_data.append(new_line)
cols_key_map[cols_key] = 1
# return
if (collapse == True):
# uniq cols
uniq_cols = []
for col in grouping_cols:
uniq_cols.append(col)
for new_col in new_cols:
uniq_cols.append(new_col)
return TSV(new_header, new_data).to_numeric(new_cols, precision, inherit_message = inherit_message).select(uniq_cols)
else:
return TSV(new_header, new_data).to_numeric(new_cols, precision, inherit_message = inherit_message)
def filter(self, cols, func, include_cond = True, inherit_message = ""):
# TODO: Filter should not use regex. Need to add warning as the order of fields matter
cols = self.__get_matching_cols__(cols)
indexes = self.__get_col_indexes__(cols)
# count the number of columns
num_cols = len(cols)
# new data
new_data = []
counter = 0
for line in self.data:
counter = counter + 1
utils.report_progress("filter: [1/1] calling function", inherit_message, counter, len(self.data))
fields = line.split("\t")
col_values = []
for index in indexes:
col_values.append(fields[index])
if (num_cols == 1):
result = func(col_values[0])
elif (num_cols == 2):
result = func(col_values[0], col_values[1])
elif (num_cols == 3):
result = func(col_values[0], col_values[1], col_values[2])
elif (num_cols == 4):
result = func(col_values[0], col_values[1], col_values[2], col_values[3])
elif (num_cols == 5):
result = func(col_values[0], col_values[1], col_values[2], col_values[3], col_values[4])
elif (num_cols == 6):
result = func(col_values[0], col_values[1], col_values[2], col_values[3], col_values[4], col_values[5])
elif (num_cols == 7):
result = func(col_values[0], col_values[1], col_values[2], col_values[3], col_values[4], col_values[5], col_values[6])
elif (num_cols == 8):
result = func(col_values[0], col_values[1], col_values[2], col_values[3], col_values[4], col_values[5], col_values[6], col_values[7])
elif (num_cols == 9):
result = func(col_values[0], col_values[1], col_values[2], col_values[3], col_values[4], col_values[5], col_values[6], col_values[7], col_values[8])
elif (num_cols == 10):
result = func(col_values[0], col_values[1], col_values[2], col_values[3], col_values[4], col_values[5], col_values[6], col_values[7], col_values[8], col_values[9])
else:
raise Exception("Number of columns is not supported beyond 10" + str(cols))
if (result == include_cond):
new_data.append(line)
return TSV(self.header, new_data)
def exclude_filter(self, cols, func, inherit_message = ""):
inherit_message2 = inherit_message + ": exclude_filter" if (inherit_message != "") else "exclude_filter"
return self.filter(cols, func, include_cond = False, inherit_message = inherit_message2)
def transform(self, cols, func, new_col_or_cols, use_array_notation = False, inherit_message = ""):
# resolve to matching_cols
matching_cols = self.__get_matching_cols__(cols)
# find if the new cols is a single value or array
if (isinstance(new_col_or_cols, str)):
new_cols = [new_col_or_cols]
else:
new_cols = new_col_or_cols
# number of new_cols
num_new_cols = len(new_cols)
# validation
if ((utils.is_array_of_string_values(cols) == False and len(matching_cols) != 1) or (utils.is_array_of_string_values(cols) == True and len(matching_cols) != len(cols))):
raise Exception("transform api doesnt support regex style cols array as the order of columns matter:", cols, matching_cols)
# validation
for col in matching_cols:
if (col not in self.header_map.keys()):
raise Exception("Column not found:", str(col), str(self.header_fields))
# new col validation
for new_col in new_cols:
if (new_col in self.header_fields):
raise Exception("New column already exists:", new_col, str(self.header_fields))
# get the indexes
num_cols = len(matching_cols)
indexes = []
for col in matching_cols:
indexes.append(self.header_map[col])
# create new header and data
new_header = "\t".join(utils.merge_arrays([self.header_fields, new_cols]))
new_data = []
counter = 0
# iterate over data
for line in self.data:
counter = counter + 1
utils.report_progress("transform: [1/1] calling function", inherit_message, counter, len(self.data))
# get fields
fields = line.split("\t")
col_values = []
# get the fields in cols
for index in indexes:
col_values.append(fields[index])
# check which notation is used to do the function call
if (use_array_notation == False):
if (num_cols == 1):
# TODO: this if condition is not able to do error check when number of output columns doesnt match number of input cols
result = func(col_values[0])
elif (num_cols == 2):
result = func(col_values[0], col_values[1])
elif (num_cols == 3):
result = func(col_values[0], col_values[1], col_values[2])
elif (num_cols == 4):
result = func(col_values[0], col_values[1], col_values[2], col_values[3])
elif (num_cols == 5):
result = func(col_values[0], col_values[1], col_values[2], col_values[3], col_values[4])
elif (num_cols == 6):
result = func(col_values[0], col_values[1], col_values[2], col_values[3], col_values[4], col_values[5])
elif (num_cols == 7):
result = func(col_values[0], col_values[1], col_values[2], col_values[3], col_values[4], col_values[5], col_values[6])
elif (num_cols == 8):
result = func(col_values[0], col_values[1], col_values[2], col_values[3], col_values[4], col_values[5], col_values[6], col_values[7])
elif (num_cols == 9):
result = func(col_values[0], col_values[1], col_values[2], col_values[3], col_values[4], col_values[5], col_values[6], col_values[7], col_values[8])
elif (num_cols == 10):
result = func(col_values[0], col_values[1], col_values[2], col_values[3], col_values[4], col_values[5], col_values[6], col_values[7], col_values[8], col_values[9])
elif (num_cols == 11):
result = func(col_values[0], col_values[1], col_values[2], col_values[3], col_values[4], col_values[5], col_values[6], col_values[7], col_values[8], col_values[9], col_values[10])
elif (num_cols == 12):
result = func(col_values[0], col_values[1], col_values[2], col_values[3], col_values[4], col_values[5], col_values[6], col_values[7], col_values[8], col_values[9], col_values[10], col_values[11])
elif (num_cols == 13):
result = func(col_values[0], col_values[1], col_values[2], col_values[3], col_values[4], col_values[5], col_values[6], col_values[7], col_values[8], col_values[9], col_values[10], col_values[11], col_values[12])
elif (num_cols == 14):
result = func(col_values[0], col_values[1], col_values[2], col_values[3], col_values[4], col_values[5], col_values[6], col_values[7], col_values[8], col_values[9], col_values[10], col_values[11], col_values[12], col_values[13])
elif (num_cols == 15):
result = func(col_values[0], col_values[1], col_values[2], col_values[3], col_values[4], col_values[5], col_values[6], col_values[7], col_values[8], col_values[9], col_values[10], col_values[11], col_values[12], col_values[13], col_values[15])
else:
raise Exception("Number of columns is not supported beyond 15. Probably try to use use_array_notation approach:" + str(cols))
else:
result = func(col_values)
# create new line and append to data. Do validation
result_arr = []
if (use_array_notation == False):
if (num_new_cols == 1):
if (isinstance(result, list)):
result_arr.append(str(result[0]))
else:
result_arr.append(str(result))
if (num_new_cols >= 2):
result_arr.append(str(result[0]))
result_arr.append(str(result[1]))
if (num_new_cols >= 3):
result_arr.append(str(result[2]))
if (num_new_cols >= 4):
result_arr.append(str(result[3]))
if (num_new_cols >= 5):
result_arr.append(str(result[4]))
if (num_new_cols >= 6):
result_arr.append(str(result[5]))
if (num_new_cols >= 7):
result_arr.append(str(result[6]))
if (num_new_cols >= 8):
result_arr.append(str(result[7]))
if (num_new_cols >= 9):
result_arr.append(str(result[8]))
if (num_new_cols >= 10):
result_arr.append(str(result[9]))
if (num_new_cols >= 11):
raise Exception("Number of new columns is not supported beyond 10. Probably try to use use_array_notation approach:" + str(new_cols))
else:
# check how many columns to expect.
if (num_new_cols == 1):
if (isinstance(result, str)):
result_arr.append(str(result))
elif (isinstance(result, list)):
result_arr.append(str(result[0]))
else:
result_arr.append(str(result))
else:
if (len(result) != num_new_cols):
raise Exception("Invalid number of fields in the result array. Expecting: {}, Got: {}, result: {}, new_cols: {}".format(num_new_cols, len(result), result, new_cols))
for r in result:
result_arr.append(r)
# create new line
new_line = "\t".join(utils.merge_arrays([fields, result_arr]))
new_data.append(new_line)
# return
return TSV(new_header, new_data)
def transform_inline(self, cols, func, inherit_message = ""):
# find the matching cols and indexes
matching_cols = self.__get_matching_cols__(cols)
indexes = self.__get_col_indexes__(matching_cols)
# print which columns are going to be transformed
if (len(matching_cols) != len(cols) and len(matching_cols) != 1):
utils.debug("transform_inline: list of columns that will be transformed: {}".format(str(matching_cols)))
# create new data
new_data = []
counter = 0
for line in self.data:
counter = counter + 1
utils.report_progress("transform_inline: [1/1] calling function", inherit_message, counter, len(self.data))
fields = line.split("\t")
new_fields = []
for i in range(len(fields)):
if (i in indexes):
new_fields.append(str(func(fields[i])))
else:
new_fields.append(str(fields[i]))
new_data.append("\t".join(new_fields))
return TSV(self.header, new_data)
def rename(self, col, new_col):
if (col not in self.header_map.keys()):
raise Exception("Column not found:", str(col), str(self.header_fields))
if (new_col in self.header_map.keys()):
raise Exception("New Column already exists:", str(new_col), str(self.header_fields))
index = self.header_map[col]
header_fields2 = []
for h in self.header_fields:
if (h == col):
header_fields2.append(new_col)
else:
header_fields2.append(h)
new_header = "\t".join(header_fields2)
return TSV(new_header, self.data)
def get_header(self):
return self.header
def get_data(self):
return self.data
def get_header_map(self):
return self.header_map
def num_rows(self):
return len(self.data)
def num_cols(self):
return len(self.header_map)
def get_size_in_bytes(self):
utils.warn("Please use size_in_bytes() instead")
return self.size_in_bytes()
def size_in_bytes(self):
total = len(self.header)
for line in self.data:
total = total + len(line)
return total
def size_in_mb(self):
return int(self.size_in_bytes() / 1e6)
def size_in_gb(self):
return int(self.size_in_bytes() / 1e9)
def get_header_fields(self):
return self.header_fields
def get_columns(self):
return self.get_header_fields()
def columns(self):
utils.warn("Deprecated. Use get_columns() instead")
return self.get_columns()
def get_column_index(self, col):
# validation
if (col not in self.get_columns()):
raise Exception("Column not found: {}, {}".format(col, self.get_columns()))
# get index
header_map = self.get_header_map()
return header_map[col]
def export_to_maps(self):
utils.warn("Please use to_maps()")
return self.to_maps()
def to_maps(self):
mps = []
for line in self.data:
fields = line.split("\t")
mp = {}
for i in range(len(self.header_fields)):
mp[self.header_fields[i]] = str(fields[i])
mps.append(mp)
return mps
def __convert_to_numeric__(self, x, precision = 6):
try:
if (int(float(x)) == float(x)):
return str(int(float(x)))
else:
precision_str = "{:." + str(precision) + "f}"
return precision_str.format(float(x))
except ValueError:
return str(x)
# TODO
def to_numeric(self, cols, precision = 6, inherit_message = ""):
inherit_message2 = inherit_message + ": to_numeric" if (len(inherit_message) > 0) else "to_numeric"
return self.transform_inline(cols, lambda x: self.__convert_to_numeric__(x, precision), inherit_message = inherit_message2)
def add_seq_num(self, new_col, inherit_message = ""):
# validation
if (new_col in self.header_map.keys()):
raise Exception("Output column name already exists:", new_col, self.header_fields)
# create new header
new_header = new_col + "\t" + self.header
# create new data
new_data = []
counter = 0
for line in self.data:
counter = counter + 1
utils.report_progress("add_seq_num: [1/1] adding new column", inherit_message, counter, len(self.data))
new_data.append(str(counter) + "\t" + line)
# return
return TSV(new_header, new_data)
def show_transpose(self, n = 1, title = None):
# validation and doing min
if (self.num_rows() < n):
n = self.num_rows()
# max width of screen to determine per column width
max_width = 180
max_col_width = int(max_width / (n + 1))
return self.transpose(n).show(n = self.num_cols(), max_col_width = max_col_width, title = title)
def show(self, n = 100, max_col_width = 40, title = None):
self.take(n).__show_topn__(max_col_width, title)
# return the original tsv
return self
def __show_topn__(self, max_col_width, title):
spaces = " ".join([""]*max_col_width)
# gather data about width of columns
col_widths = {}
is_numeric_type_map = {}
# determine which columns are numeric type
for k in self.header_map.keys():
col_widths[k] = min(len(k), max_col_width)
is_numeric_type_map[k] = True
# determine width
for line in self.data:
fields = line.split("\t")
for i in range(len(fields)):
k = self.header_index_map[i]
value = fields[i]
col_widths[k] = min(max_col_width, max(col_widths[k], len(str(value))))
try:
vfloat = float(value)
except ValueError:
is_numeric_type_map[k] = False
# combine header and lines
all_data = [self.header]
for line in self.data:
all_data.append(line)
# print label
if (title is not None):
print("=============================================================================================================================================")
print(title)
print("=============================================================================================================================================")
# iterate and print. +1 for header
for i in range(len(all_data)):
line = all_data[i]
fields = line.split("\t")
row = []
for j in range(len(fields)):
col_width = col_widths[self.header_index_map[j]]
value = str(fields[j])
if (len(value) > col_width):
value = value[0:col_width]
elif (len(value) < col_width):
if (j > 0 and is_numeric_type_map[self.header_index_map[j]] == True):
value = spaces[0:col_width - len(value)] + value
else:
value = value + spaces[0:col_width - len(value)]
row.append(str(value))
print("\t".join(row))
if (title is not None):
print("=============================================================================================================================================")
# return self
return self
def col_as_array(self, col):
if (col not in self.header_map.keys()):
raise Exception("Column not found:", str(col), str(self.header_fields))
index = self.header_map[col]
ret_values = []
for line in self.data:
fields = line.split("\t")
ret_values.append(str(fields[index]))
return ret_values
def col_as_float_array(self, col):
values = self.col_as_array(col)
float_values = [float(v) for v in values]
return float_values
def col_as_int_array(self, col):
values = self.col_as_float_array(col)
numeric_values = [int(v) for v in values]
return numeric_values
def col_as_array_uniq(self, col):
values = self.col_as_array(col)
return list(dict.fromkeys(values))
# this method returns hashmap of key->map[k:v]
# TODO: keys should be changed to single column
def cols_as_map(self, key_cols, value_cols):
utils.warn("This api has changed from prev implementation")
# validation
key_cols = self.__get_matching_cols__(key_cols)
# check for all columns in the value part
value_cols = self.__get_matching_cols__(value_cols)
# create map
mp = {}
for line in self.data:
fields = line.split("\t")
# get the key
keys = []
for key_col in key_cols:
key = fields[self.header_map[key_col]]
keys.append(key)
keys_tuple = self.__expand_to_tuple__(keys)
# check for non duplicate keys
if (keys_tuple in mp.keys()):
raise Exception("keys is not unique:", keys)
values = []
for value_col in value_cols:
value = fields[self.header_map[value_col]]
values.append(str(value))
values_tuple = self.__expand_to_tuple__(values)
# store value in hashmap
mp[keys_tuple] = values_tuple
return mp
def __sort_helper__(self, line, indexes, all_numeric):
values = []
fields = line.split("\t")
for i in indexes:
if (all_numeric == True):
values.append(float(fields[i]))
else:
values.append(fields[i])
return tuple(values)
def sort(self, cols = None, reverse = False, reorder = False, all_numeric = None):
# if nothing is specified sort on all columns
if (cols is None):
cols = self.get_header_fields()
# find the matching cols and indexes
matching_cols = self.__get_matching_cols__(cols)
indexes = self.__get_col_indexes__(matching_cols)
# check if all are numeric or not
if (all_numeric is None):
has_alpha = False
for col in matching_cols:
if (utils.is_float_col(self, col) == False):
has_alpha = True
break
if (has_alpha == True):
all_numeric = False
else:
all_numeric = True
# sort
new_data = sorted(self.data, key = lambda line: self.__sort_helper__(line, indexes, all_numeric = all_numeric), reverse = reverse)
# check if need to reorder the fields
if (reorder == True):
return TSV(self.header, new_data).reorder(matching_cols, inherit_message = "sort")
else:
return TSV(self.header, new_data)
def reverse_sort(self, cols = None, reorder = False, all_numeric = None):
return self.sort(cols = cols, reverse = True, reorder = reorder, all_numeric = all_numeric)
# reorder the specific columns
def reorder(self, cols, use_existing_order = True, inherit_message = ""):
# get matching column and indexes
matching_cols = self.__get_matching_cols__(cols)
indexes = self.__get_col_indexes__(matching_cols)
# do a full reorder if asked
if (use_existing_order == False):
# get the non matching cols
non_matching_cols = []
for c in self.get_header_fields():
if (c not in matching_cols):
non_matching_cols.append(c)
# all cols
all_cols = []
for c in matching_cols:
all_cols.apppend(c)
for c in non_matching_cols:
all_cols.apppend(c)
# return
return self.select(all_cols).reorder(cols, use_existing_order = False, inherit_message = inherit_message)
# create a map of columns that match the criteria
new_header_fields = []
# append all the matching columns
for h in self.header_fields:
if (h in matching_cols):
new_header_fields.append(h)
# append all the remaining columns
for h in self.header_fields:
if (h not in matching_cols):
new_header_fields.append(h)
# pass on the message
inherit_message2 = inherit_message + ": reorder" if (len(inherit_message) > 0) else "reorder"
return self.select(new_header_fields, inherit_message = inherit_message2)
def reorder_reverse(self, cols, inherit_message = ""):
utils.warn("Please use reverse_reorder instead")
return self.reverse_reorder(cols, inherit_message)
# reorder for pushing the columns to the end
def reverse_reorder(self, cols, inherit_message = ""):
# get matching column and indexes
matching_cols = self.__get_matching_cols__(cols)
# generate the list of cols that should be brought to front
rcols = []
for h in self.header_fields:
if (h not in matching_cols):
rcols.append(h)
# pass on the message
inherit_message2 = inherit_message + ": reorder_reverse" if (len(inherit_message) > 0) else "reorder_reverse"
return self.reorder(rcols, inherit_message = inherit_message2)
def noop(self, *args, **kwargs):
return self
def to_df(self, n = -1):
return self.export_to_df(n)
def export_to_df(self, n = -1):
# find how many rows to select
nrows = len(self.data)
nrows = n if (n > 0 and n < nrows) else nrows
# initialize map
df_map = {}
for h in self.header_fields:
df_map[h] = []
# iterate over data
for line in self.data[0:nrows]:
fields = line.split("\t")
for i in range(len(fields)):
df_map[self.header_index_map[i]].append(str(fields[i]))
# return
return | pd.DataFrame(df_map) | pandas.DataFrame |
from collections import deque
from datetime import datetime
import operator
import re
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
import pandas.core.common as com
from pandas.core.computation.expressions import _MIN_ELEMENTS, _NUMEXPR_INSTALLED
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons:
# Specifically _not_ flex-comparisons
def test_frame_in_list(self):
# GH#12689 this should raise at the DataFrame level, not blocks
df = pd.DataFrame(np.random.randn(6, 4), columns=list("ABCD"))
msg = "The truth value of a DataFrame is ambiguous"
with pytest.raises(ValueError, match=msg):
df in [None]
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
# we expect the result to match Series comparisons for
# == and !=, inequalities should raise
result = x == y
expected = pd.DataFrame(
{col: x[col] == y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
result = x != y
expected = pd.DataFrame(
{col: x[col] != y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
msgs = [
r"Invalid comparison between dtype=datetime64\[ns\] and ndarray",
"invalid type promotion",
(
# npdev 1.20.0
r"The DTypes <class 'numpy.dtype\[.*\]'> and "
r"<class 'numpy.dtype\[.*\]'> do not have a common DType."
),
]
msg = "|".join(msgs)
with pytest.raises(TypeError, match=msg):
x >= y
with pytest.raises(TypeError, match=msg):
x > y
with pytest.raises(TypeError, match=msg):
x < y
with pytest.raises(TypeError, match=msg):
x <= y
# GH4968
# invalid date/int comparisons
df = pd.DataFrame(np.random.randint(10, size=(10, 1)), columns=["a"])
df["dates"] = pd.date_range("20010101", periods=len(df))
df2 = df.copy()
df2["dates"] = df["a"]
check(df, df2)
df = pd.DataFrame(np.random.randint(10, size=(10, 2)), columns=["a", "b"])
df2 = pd.DataFrame(
{
"a": pd.date_range("20010101", periods=len(df)),
"b": pd.date_range("20100101", periods=len(df)),
}
)
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH#4982
df = pd.DataFrame(
{
"dates1": pd.date_range("20010101", periods=10),
"dates2": pd.date_range("20010102", periods=10),
"intcol": np.random.randint(1000000000, size=10),
"floatcol": np.random.randn(10),
"stringcol": list(tm.rands(10)),
}
)
df.loc[np.random.rand(len(df)) > 0.5, "dates2"] = pd.NaT
ops = {"gt": "lt", "lt": "gt", "ge": "le", "le": "ge", "eq": "eq", "ne": "ne"}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
if left in ["eq", "ne"]:
expected = left_f(df, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), df)
tm.assert_frame_equal(result, expected)
else:
msg = (
"'(<|>)=?' not supported between "
"instances of 'numpy.ndarray' and 'Timestamp'"
)
with pytest.raises(TypeError, match=msg):
left_f(df, pd.Timestamp("20010109"))
with pytest.raises(TypeError, match=msg):
right_f(pd.Timestamp("20010109"), df)
# nats
expected = left_f(df, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), df)
tm.assert_frame_equal(result, expected)
def test_mixed_comparison(self):
# GH#13128, GH#22163 != datetime64 vs non-dt64 should be False,
# not raise TypeError
# (this appears to be fixed before GH#22163, not sure when)
df = pd.DataFrame([["1989-08-01", 1], ["1989-08-01", 2]])
other = pd.DataFrame([["a", "b"], ["c", "d"]])
result = df == other
assert not result.any().any()
result = df != other
assert result.all().all()
def test_df_boolean_comparison_error(self):
# GH#4576, GH#22880
# comparing DataFrame against list/tuple with len(obj) matching
# len(df.columns) is supported as of GH#22800
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
expected = pd.DataFrame([[False, False], [True, False], [False, False]])
result = df == (2, 2)
tm.assert_frame_equal(result, expected)
result = df == [2, 2]
tm.assert_frame_equal(result, expected)
def test_df_float_none_comparison(self):
df = pd.DataFrame(
np.random.randn(8, 3), index=range(8), columns=["A", "B", "C"]
)
result = df.__eq__(None)
assert not result.any().any()
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
class TestFrameFlexComparisons:
# TODO: test_bool_flex_frame needs a better name
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = pd.DataFrame(data)
other = pd.DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
tm.assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ["eq", "ne", "gt", "lt", "ge", "le"]:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
tm.assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
tm.assert_frame_equal(f(other.values), o(df, other.values))
# scalar
tm.assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
tm.assert_frame_equal(f(np.nan), o(df, np.nan))
with pytest.raises(ValueError, match=msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
tm.assert_frame_equal(col_eq, df == pd.Series(col_ser))
tm.assert_frame_equal(col_eq, -col_ne)
tm.assert_frame_equal(idx_eq, -idx_ne)
tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
tm.assert_frame_equal(col_eq, df.eq(list(col_ser)))
tm.assert_frame_equal(idx_eq, df.eq(pd.Series(idx_ser), axis=0))
tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
tm.assert_frame_equal(col_gt, df > pd.Series(col_ser))
tm.assert_frame_equal(col_gt, -col_le)
tm.assert_frame_equal(idx_gt, -idx_le)
tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
tm.assert_frame_equal(col_ge, df >= pd.Series(col_ser))
tm.assert_frame_equal(col_ge, -col_lt)
tm.assert_frame_equal(idx_ge, -idx_lt)
tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = pd.Series(np.random.randn(5))
col_ser = pd.Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
def test_bool_flex_frame_complex_dtype(self):
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = pd.DataFrame({"a": arr})
df2 = pd.DataFrame({"a": arr2})
msg = "|".join(
[
"'>' not supported between instances of '.*' and 'complex'",
r"unorderable types: .*complex\(\)", # PY35
]
)
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df.gt(df2)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df["a"].gt(df2["a"])
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df.values > df2.values
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = pd.DataFrame({"a": arr3})
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df3.gt(2j)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df3["a"].gt(2j)
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df3.values > 2j
def test_bool_flex_frame_object_dtype(self):
# corner, dtype=object
df1 = pd.DataFrame({"col": ["foo", np.nan, "bar"]})
df2 = pd.DataFrame({"col": ["foo", datetime.now(), "bar"]})
result = df1.ne(df2)
exp = pd.DataFrame({"col": [False, True, False]})
tm.assert_frame_equal(result, exp)
def test_flex_comparison_nat(self):
# GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT,
# and _definitely_ not be NaN
df = pd.DataFrame([pd.NaT])
result = df == pd.NaT
# result.iloc[0, 0] is a np.bool_ object
assert result.iloc[0, 0].item() is False
result = df.eq(pd.NaT)
assert result.iloc[0, 0].item() is False
result = df != pd.NaT
assert result.iloc[0, 0].item() is True
result = df.ne(pd.NaT)
assert result.iloc[0, 0].item() is True
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH 15077, non-empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
result = getattr(df, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH 15077 empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
def test_df_flex_cmp_ea_dtype_with_ndarray_series(self):
ii = pd.IntervalIndex.from_breaks([1, 2, 3])
df = pd.DataFrame({"A": ii, "B": ii})
ser = pd.Series([0, 0])
res = df.eq(ser, axis=0)
expected = pd.DataFrame({"A": [False, False], "B": [False, False]})
tm.assert_frame_equal(res, expected)
ser2 = pd.Series([1, 2], index=["A", "B"])
res2 = df.eq(ser2, axis=1)
tm.assert_frame_equal(res2, expected)
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic:
def test_floordiv_axis0(self):
# make sure we df.floordiv(ser, axis=0) matches column-wise result
arr = np.arange(3)
ser = pd.Series(arr)
df = pd.DataFrame({"A": ser, "B": ser})
result = df.floordiv(ser, axis=0)
expected = pd.DataFrame({col: df[col] // ser for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = df.floordiv(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
@pytest.mark.skipif(not _NUMEXPR_INSTALLED, reason="numexpr not installed")
@pytest.mark.parametrize("opname", ["floordiv", "pow"])
def test_floordiv_axis0_numexpr_path(self, opname):
# case that goes through numexpr and has to fall back to masked_arith_op
op = getattr(operator, opname)
arr = np.arange(_MIN_ELEMENTS + 100).reshape(_MIN_ELEMENTS // 100 + 1, -1) * 100
df = pd.DataFrame(arr)
df["C"] = 1.0
ser = df[0]
result = getattr(df, opname)(ser, axis=0)
expected = pd.DataFrame({col: op(df[col], ser) for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = getattr(df, opname)(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
def test_df_add_td64_columnwise(self):
# GH 22534 Check that column-wise addition broadcasts correctly
dti = pd.date_range("2016-01-01", periods=10)
tdi = pd.timedelta_range("1", periods=10)
tser = pd.Series(tdi)
df = pd.DataFrame({0: dti, 1: tdi})
result = df.add(tser, axis=0)
expected = pd.DataFrame({0: dti + tdi, 1: tdi + tdi})
tm.assert_frame_equal(result, expected)
def test_df_add_flex_filled_mixed_dtypes(self):
# GH 19611
dti = pd.date_range("2016-01-01", periods=3)
ser = pd.Series(["1 Day", "NaT", "2 Days"], dtype="timedelta64[ns]")
df = pd.DataFrame({"A": dti, "B": ser})
other = pd.DataFrame({"A": ser, "B": ser})
fill = pd.Timedelta(days=1).to_timedelta64()
result = df.add(other, fill_value=fill)
expected = pd.DataFrame(
{
"A": pd.Series(
["2016-01-02", "2016-01-03", "2016-01-05"], dtype="datetime64[ns]"
),
"B": ser * 2,
}
)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame(
self, all_arithmetic_operators, float_frame, mixed_float_frame
):
# one instance of parametrized fixture
op = all_arithmetic_operators
def f(x, y):
# r-versions not in operator-stdlib; get op without "r" and invert
if op.startswith("__r"):
return getattr(operator, op.replace("__r", "__"))(y, x)
return getattr(operator, op)(x, y)
result = getattr(float_frame, op)(2 * float_frame)
expected = f(float_frame, 2 * float_frame)
tm.assert_frame_equal(result, expected)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
@pytest.mark.parametrize("op", ["__add__", "__sub__", "__mul__"])
def test_arith_flex_frame_mixed(
self, op, int_frame, mixed_int_frame, mixed_float_frame
):
f = getattr(operator, op)
# vs mix int
result = getattr(mixed_int_frame, op)(2 + mixed_int_frame)
expected = f(mixed_int_frame, 2 + mixed_int_frame)
# no overflow in the uint
dtype = None
if op in ["__sub__"]:
dtype = dict(B="uint64", C=None)
elif op in ["__add__", "__mul__"]:
dtype = dict(C=None)
tm.assert_frame_equal(result, expected)
_check_mixed_int(result, dtype=dtype)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
# vs plain int
result = getattr(int_frame, op)(2 * int_frame)
expected = f(int_frame, 2 * int_frame)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame_raise(self, all_arithmetic_operators, float_frame):
# one instance of parametrized fixture
op = all_arithmetic_operators
# Check that arrays with dim >= 3 raise
for dim in range(3, 6):
arr = np.ones((1,) * dim)
msg = "Unable to coerce to Series/DataFrame"
with pytest.raises(ValueError, match=msg):
getattr(float_frame, op)(arr)
def test_arith_flex_frame_corner(self, float_frame):
const_add = float_frame.add(1)
tm.assert_frame_equal(const_add, float_frame + 1)
# corner cases
result = float_frame.add(float_frame[:0])
tm.assert_frame_equal(result, float_frame * np.nan)
result = float_frame[:0].add(float_frame)
tm.assert_frame_equal(result, float_frame * np.nan)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], fill_value=3)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], axis="index", fill_value=3)
def test_arith_flex_series(self, simple_frame):
df = simple_frame
row = df.xs("a")
col = df["two"]
# after arithmetic refactor, add truediv here
ops = ["add", "sub", "mul", "mod"]
for op in ops:
f = getattr(df, op)
op = getattr(operator, op)
tm.assert_frame_equal(f(row), op(df, row))
tm.assert_frame_equal(f(col, axis=0), op(df.T, col).T)
# special case for some reason
tm.assert_frame_equal(df.add(row, axis=None), df + row)
# cases which will be refactored after big arithmetic refactor
tm.assert_frame_equal(df.div(row), df / row)
tm.assert_frame_equal(df.div(col, axis=0), (df.T / col).T)
# broadcasting issue in GH 7325
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="int64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="float64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
def test_arith_flex_zero_len_raises(self):
# GH 19522 passing fill_value to frame flex arith methods should
# raise even in the zero-length special cases
ser_len0 = pd.Series([], dtype=object)
df_len0 = pd.DataFrame(columns=["A", "B"])
df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
with pytest.raises(NotImplementedError, match="fill_value"):
df.add(ser_len0, fill_value="E")
with pytest.raises(NotImplementedError, match="fill_value"):
df_len0.sub(df["A"], axis=None, fill_value=3)
def test_flex_add_scalar_fill_value(self):
# GH#12723
dat = np.array([0, 1, np.nan, 3, 4, 5], dtype="float")
df = pd.DataFrame({"foo": dat}, index=range(6))
exp = df.fillna(0).add(2)
res = df.add(2, fill_value=0)
tm.assert_frame_equal(res, exp)
class TestFrameArithmetic:
def test_td64_op_nat_casting(self):
# Make sure we don't accidentally treat timedelta64(NaT) as datetime64
# when calling dispatch_to_series in DataFrame arithmetic
ser = pd.Series(["NaT", "NaT"], dtype="timedelta64[ns]")
df = pd.DataFrame([[1, 2], [3, 4]])
result = df * ser
expected = pd.DataFrame({0: ser, 1: ser})
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_rowlike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
expected = pd.DataFrame(
[[2, 4], [4, 6], [6, 8]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + rowlike
tm.assert_frame_equal(result, expected)
result = rowlike + df
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_collike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
expected = pd.DataFrame(
[[1, 2], [5, 6], [9, 10]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + collike
tm.assert_frame_equal(result, expected)
result = collike + df
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_rowlike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
exvals = [
getattr(df.loc["A"], opname)(rowlike.squeeze()),
getattr(df.loc["B"], opname)(rowlike.squeeze()),
getattr(df.loc["C"], opname)(rowlike.squeeze()),
]
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index)
result = getattr(df, opname)(rowlike)
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_collike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
exvals = {
True: getattr(df[True], opname)(collike.squeeze()),
False: getattr(df[False], opname)(collike.squeeze()),
}
dtype = None
if opname in ["__rmod__", "__rfloordiv__"]:
# Series ops may return mixed int/float dtypes in cases where
# DataFrame op will return all-float. So we upcast `expected`
dtype = np.common_type(*[x.values for x in exvals.values()])
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index, dtype=dtype)
result = getattr(df, opname)(collike)
tm.assert_frame_equal(result, expected)
def test_df_bool_mul_int(self):
# GH 22047, GH 22163 multiplication by 1 should result in int dtype,
# not object dtype
df = pd.DataFrame([[False, True], [False, False]])
result = df * 1
# On appveyor this comes back as np.int32 instead of np.int64,
# so we check dtype.kind instead of just dtype
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
result = 1 * df
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
def test_arith_mixed(self):
left = pd.DataFrame({"A": ["a", "b", "c"], "B": [1, 2, 3]})
result = left + left
expected = pd.DataFrame({"A": ["aa", "bb", "cc"], "B": [2, 4, 6]})
tm.assert_frame_equal(result, expected)
def test_arith_getitem_commute(self):
df = pd.DataFrame({"A": [1.1, 3.3], "B": [2.5, -3.9]})
def _test_op(df, op):
result = op(df, 1)
if not df.columns.is_unique:
raise ValueError("Only unique columns supported by this test")
for col in result.columns:
tm.assert_series_equal(result[col], op(df[col], 1))
_test_op(df, operator.add)
_test_op(df, operator.sub)
_test_op(df, operator.mul)
_test_op(df, operator.truediv)
_test_op(df, operator.floordiv)
_test_op(df, operator.pow)
_test_op(df, lambda x, y: y + x)
_test_op(df, lambda x, y: y - x)
_test_op(df, lambda x, y: y * x)
_test_op(df, lambda x, y: y / x)
_test_op(df, lambda x, y: y ** x)
_test_op(df, lambda x, y: x + y)
_test_op(df, lambda x, y: x - y)
_test_op(df, lambda x, y: x * y)
_test_op(df, lambda x, y: x / y)
_test_op(df, lambda x, y: x ** y)
@pytest.mark.parametrize(
"values", [[1, 2], (1, 2), np.array([1, 2]), range(1, 3), deque([1, 2])]
)
def test_arith_alignment_non_pandas_object(self, values):
# GH#17901
df = pd.DataFrame({"A": [1, 1], "B": [1, 1]})
expected = pd.DataFrame({"A": [2, 2], "B": [3, 3]})
result = df + values
tm.assert_frame_equal(result, expected)
def test_arith_non_pandas_object(self):
df = pd.DataFrame(
np.arange(1, 10, dtype="f8").reshape(3, 3),
columns=["one", "two", "three"],
index=["a", "b", "c"],
)
val1 = df.xs("a").values
added = pd.DataFrame(df.values + val1, index=df.index, columns=df.columns)
tm.assert_frame_equal(df + val1, added)
added = pd.DataFrame((df.values.T + val1).T, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val1, axis=0), added)
val2 = list(df["two"])
added = pd.DataFrame(df.values + val2, index=df.index, columns=df.columns)
tm.assert_frame_equal(df + val2, added)
added = pd.DataFrame((df.values.T + val2).T, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val2, axis="index"), added)
val3 = np.random.rand(*df.shape)
added = pd.DataFrame(df.values + val3, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val3), added)
def test_operations_with_interval_categories_index(self, all_arithmetic_operators):
# GH#27415
op = all_arithmetic_operators
ind = pd.CategoricalIndex(pd.interval_range(start=0.0, end=2.0))
data = [1, 2]
df = pd.DataFrame([data], columns=ind)
num = 10
result = getattr(df, op)(num)
expected = pd.DataFrame([[getattr(n, op)(num) for n in data]], columns=ind)
tm.assert_frame_equal(result, expected)
def test_frame_with_frame_reindex(self):
# GH#31623
df = pd.DataFrame(
{
"foo": [pd.Timestamp("2019"), pd.Timestamp("2020")],
"bar": [pd.Timestamp("2018"), pd.Timestamp("2021")],
},
columns=["foo", "bar"],
)
df2 = df[["foo"]]
result = df - df2
expected = pd.DataFrame(
{"foo": [pd.Timedelta(0), pd.Timedelta(0)], "bar": [np.nan, np.nan]},
columns=["bar", "foo"],
)
tm.assert_frame_equal(result, expected)
def test_frame_with_zero_len_series_corner_cases():
# GH#28600
# easy all-float case
df = pd.DataFrame(np.random.randn(6).reshape(3, 2), columns=["A", "B"])
ser = pd.Series(dtype=np.float64)
result = df + ser
expected = pd.DataFrame(df.values * np.nan, columns=df.columns)
tm.assert_frame_equal(result, expected)
result = df == ser
expected = pd.DataFrame(False, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
# non-float case should not raise on comparison
df2 = pd.DataFrame(df.values.view("M8[ns]"), columns=df.columns)
result = df2 == ser
expected = pd.DataFrame(False, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_zero_len_frame_with_series_corner_cases():
# GH#28600
df = pd.DataFrame(columns=["A", "B"], dtype=np.float64)
ser = pd.Series([1, 2], index=["A", "B"])
result = df + ser
expected = df
tm.assert_frame_equal(result, expected)
def test_frame_single_columns_object_sum_axis_1():
# GH 13758
data = {
"One": pd.Series(["A", 1.2, np.nan]),
}
df = pd.DataFrame(data)
result = df.sum(axis=1)
expected = pd.Series(["A", 1.2, 0])
tm.assert_series_equal(result, expected)
# -------------------------------------------------------------------
# Unsorted
# These arithmetic tests were previously in other files, eventually
# should be parametrized and put into tests.arithmetic
class TestFrameArithmeticUnsorted:
def test_frame_add_tz_mismatch_converts_to_utc(self):
rng = pd.date_range("1/1/2011", periods=10, freq="H", tz="US/Eastern")
df = pd.DataFrame(np.random.randn(len(rng)), index=rng, columns=["a"])
df_moscow = df.tz_convert("Europe/Moscow")
result = df + df_moscow
assert result.index.tz is pytz.utc
result = df_moscow + df
assert result.index.tz is pytz.utc
def test_align_frame(self):
rng = | pd.period_range("1/1/2000", "1/1/2010", freq="A") | pandas.period_range |
import os
import numpy as np
import pandas as pd
import seaborn as sns
from scipy.stats import ttest_ind
from scipy.stats import bartlett
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
radiomics_path = './results/radiomics/radiomics.csv'
name_mapping_path = './data/name_mapping.csv'
if __name__ == '__main__':
mapping = pd.read_csv(name_mapping_path, header=0)[['Grade', 'BraTS_2019_subject_ID']]
radiomics = pd.read_csv(radiomics_path, header=0, index_col=0)
df = radiomics.merge(mapping, left_index=True, right_on='BraTS_2019_subject_ID')
print(df.groupby('Grade').mean())
print(df.groupby('Grade').var())
lgg = df[df['Grade'] == 'LGG']
hgg = df[df['Grade'] == 'HGG']
print(bartlett(lgg['ALL_VoxelVolume'], hgg['ALL_VoxelVolume']))
print(ttest_ind(lgg['ALL_VoxelVolume'], hgg['ALL_VoxelVolume'], equal_var=False))
df.groupby('Grade')['ALL_VoxelVolume'].apply(
lambda x: sns.distplot(x, bins=50, hist=True, rug=False, label=x.name)
)
plt.xlabel('Voxel volume')
plt.ylabel('KDE')
plt.show()
y = np.array(df['Grade'])
X = np.array(df.loc[:, ['ALL_VoxelVolume']])
random_state = 1173
kf = KFold(n_splits=5, shuffle=True, random_state=random_state)
accuracy_list = []
precision_list = []
sensitivity_list = []
specificity_list = []
npv_list = []
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
clf = LogisticRegression(random_state=random_state).fit(X_train, y_train)
y_pred = clf.predict(X_test)
tn, fp, fn, tp = confusion_matrix(y_test, y_pred).ravel()
accuracy = (tp + tn) / (tp + tn + fp + fn)
precision = tp / (tp + fp)
sensitivity = tp / (tp + fn)
specificity = tn / (tn + fp)
npv = tn / (tn + fn)
accuracy_list.append(accuracy)
precision_list.append(precision)
sensitivity_list.append(sensitivity)
specificity_list.append(specificity)
npv_list.append(npv)
result = {
'accuracy_mean': [np.mean(accuracy_list)],
'accuracy_std': [np.std(accuracy_list)],
'precision_mean': [np.mean(precision_list)],
'precision_std': [np.std(precision_list)],
'sensitivity_mean': [np.mean(sensitivity_list)],
'sensitivity_std': [np.std(sensitivity_list)],
'specificity_mean': [np.mean(specificity_list)],
'specificity_std': [np.std(specificity_list)],
'npv_mean': [np.mean(npv_list)],
'npv_std': [np.std(npv_list)],
}
df = | pd.DataFrame(data=result) | pandas.DataFrame |
# $Id$
# $HeadURL$
################################################################
# The contents of this file are subject to the BSD 3Clause (New)
# you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://directory.fsf.org/wiki/License:BSD_3Clause
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
# The Original Code is part of the PyRadi toolkit.
# The Initial Developer of the Original Code is <NAME>,
# Portions created by <NAME> are Copyright (C) 2006-2012
# All Rights Reserved.
# Contributor(s): <NAME>, <NAME>, <NAME>, <NAME>
################################################################
"""
This module provides functions for plotting cartesian and polar plots.
This class provides a basic plotting capability, with a minimum
number of lines. These are all wrapper functions,
based on existing functions in other Python classes.
Provision is made for combinations of linear and log scales, as well
as polar plots for two-dimensional graphs.
The Plotter class can save files to disk in a number of formats.
For more examples of use see:
https://github.com/NelisW/ComputationalRadiometry
See the __main__ function for examples of use.
This package was partly developed to provide additional material in support of students
and readers of the book Electro-Optical System Analysis and Design: A Radiometry
Perspective, <NAME>, ISBN 9780819495693, SPIE Monograph Volume
PM236, SPIE Press, 2013. http://spie.org/x648.html?product_id=2021423&origin_id=x646
"""
__version__ = "$Revision$"
__author__ = 'pyradi team'
__all__ = ['Plotter','cubehelixcmap', 'FilledMarker', 'Markers','ProcessImage',
'savePlot']
import numpy as np
import pandas as pd
import math
import sys
import itertools
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.font_manager import FontProperties
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.dates as mdates
from mpl_toolkits.axes_grid1 import make_axes_locatable
# following for the pie plots
from matplotlib.transforms import Affine2D
import mpl_toolkits.axisartist.floating_axes as floating_axes
import mpl_toolkits.axisartist.angle_helper as angle_helper
from matplotlib.projections import PolarAxes
from mpl_toolkits.axisartist.grid_finder import MaxNLocator
from matplotlib.ticker import FormatStrFormatter
from matplotlib.colors import LinearSegmentedColormap as LSC
# see if plotly is available
try:
__import__('plotly.tools')
imported_plotly = True
from plotly import tools
from plotly.offline import download_plotlyjs, offline
from plotly.graph_objs import Scatter, Layout, Figure,Scatter3d,Mesh3d,ColorBar,Contour
except ImportError:
imported_plotly = False
from datetime import datetime
####################################################################
##
class FilledMarker:
"""Filled marker user-settable values.
This class encapsulates a few variables describing a Filled marker.
Default values are provided that can be overridden in user plots.
Values relevant to filled makers are as follows:
| marker = ['o', 'v', '^', '<', '>', '8', 's', 'p', '*', 'h', 'H', 'D', 'd']
| fillstyle = ['full', 'left', 'right', 'bottom', 'top', 'none']
| colour names = http://www.w3schools.com/html/html_colornames.asp
"""
def __init__(self, markerfacecolor=None, markerfacecoloralt=None,
markeredgecolor=None, marker=None, markersize=None,
fillstyle=None):
"""Define marker default values.
Args:
| markerfacecolor (colour): main colour for marker (optional)
| markerfacecoloralt (colour): alterive colour for marker (optional)
| markeredgecolor (colour): edge colour for marker (optional)
| marker (string): string to specify the marker (optional)
| markersize (int)): size of the marker (optional)
| fillstyle (string): string to define fill style (optional)
Returns:
| Nothing. Creates the figure for subequent use.
Raises:
| No exception is raised.
"""
__all__ = ['__init__']
if markerfacecolor is None:
self.markerfacecolor = 'r'
else:
self.markerfacecolor = markerfacecolor
if markerfacecoloralt is None:
self.markerfacecoloralt = 'b'
else:
self.markerfacecoloralt = markerfacecoloralt
if markeredgecolor is None:
self.markeredgecolor = 'k'
else:
self.markeredgecolor = markeredgecolor
if marker is None:
self.marker = 'o'
else:
self.marker = marker
if markersize is None:
self.markersize = 20
else:
self.markersize = markersize
if fillstyle is None:
self.fillstyle = 'full'
else:
self.fillstyle = fillstyle
###################################################################################
###################################################################################
class Markers:
"""Collect marker location and types and mark subplot.
Build a list of markers at plot locations with the specified marker.
"""
####################################################################
##
def __init__(self, markerfacecolor = None, markerfacecoloralt = None,
markeredgecolor = None, marker = None, markersize = None,
fillstyle = None):
"""Set default marker values for this collection
Specify default marker properties to be used for all markers
in this instance. If no marker properties are specified here,
the default FilledMarker marker properties will be used.
Args:
| markerfacecolor (colour): main colour for marker (optional)
| markerfacecoloralt (colour): alternative colour for marker (optional)
| markeredgecolor (colour): edge colour for marker (optional)
| marker (string): string to specify the marker (optional)
| markersize (int)): size of the marker (optional)
| fillstyle (string): string to define fill style (optional)
Returns:
| Nothing. Creates the figure for subequent use.
Raises:
| No exception is raised.
"""
__all__ = ['__init__', 'add', 'plot']
if markerfacecolor is None:
self.markerfacecolor = None
else:
self.markerfacecolor = markerfacecolor
if markerfacecoloralt is None:
self.markerfacecoloralt = None
else:
self.markerfacecoloralt = markerfacecoloralt
if markeredgecolor is None:
self.markeredgecolor = None
else:
self.markeredgecolor = markeredgecolor
if marker is None:
self.marker = None
else:
self.marker = marker
if markersize is None:
self.markersize = markersize
else:
self.markersize = markersize
if fillstyle is None:
self.fillstyle = None
else:
self.fillstyle = fillstyle
#list if markers to be drawn
self.markers = []
####################################################################
##
def add(self,x,y,markerfacecolor = None, markerfacecoloralt = None,
markeredgecolor = None, marker = None, markersize = None,
fillstyle = None):
"""Add a marker to the list, overridding properties if necessary.
Specify location and any specific marker properties to be used.
The location can be (xy,y) for cartesian plots or (theta,rad) for polars.
If no marker properties are specified, the current marker class
properties will be used. If the current maker instance does not
specify properties, the default marker properties will be used.
Args:
| x (float): the x/theta location for the marker
| y (float): the y/radial location for the marker
| markerfacecolor (colour): main colour for marker (optional)
| markerfacecoloralt (colour): alterive colour for marker (optional)
| markeredgecolor (colour): edge colour for marker (optional)
| marker (string): string to specify the marker (optional)
| markersize (int)): size of the marker (optional)
| fillstyle (string): string to define fill style (optional)
Returns:
| Nothing. Creates the figure for subequent use.
Raises:
| No exception is raised.
"""
if markerfacecolor is None:
if self.markerfacecolor is not None:
markerfacecolor = self.markerfacecolor
if markerfacecoloralt is None:
if self.markerfacecoloralt is not None:
markerfacecoloralt = self.markerfacecoloralt
if markeredgecolor is None:
if self.markeredgecolor is not None:
markeredgecolor = self.markeredgecolor
if marker is None:
if self.marker is not None:
marker = self.marker
if markersize is None:
if self.markersize is not None:
markersize = self.markersize
if fillstyle is None:
if self.fillstyle is not None:
fillstyle = self.fillstyle
marker = FilledMarker(markerfacecolor, markerfacecoloralt ,
markeredgecolor , marker, markersize , fillstyle)
self.markers.append((x,y,marker))
####################################################################
##
def plot(self,ax):
"""Plot the current list of markers on the given axes.
All the markers currently stored in the class will be
drawn.
Args:
| ax (axes): an axes handle for the plot
Returns:
| Nothing. Creates the figure for subsequent use.
Raises:
| No exception is raised.
"""
usetex = plt.rcParams['text.usetex']
plt.rcParams['text.usetex'] = False # otherwise, '^' will cause trouble
for marker in self.markers:
ax.plot(marker[0], marker[1],
color = marker[2].markerfacecolor,
markerfacecoloralt = marker[2].markerfacecoloralt,
markeredgecolor = marker[2].markeredgecolor,
marker = marker[2].marker,
markersize = marker[2].markersize,
fillstyle = marker[2].fillstyle,
linewidth=0)
plt.rcParams['text.usetex'] = usetex
###################################################################################
###################################################################################
class ProcessImage:
"""This class provides a functions to assist in the optimal display of images.
"""
#define the compression rule to be used in the equalisation function
compressSet = [
[lambda x : x , lambda x : x, 'Linear'],
[np.log, np.exp, 'Natural Log'],
[np.sqrt, np.square, 'Square Root']]
############################################################
def __init__(self):
"""Class constructor
Sets up some variables for use in this class
Args:
| None
Returns:
| Nothing
Raises:
| No exception is raised.
"""
__all__ = ['__init__', 'compressEqualizeImage', 'reprojectImageIntoPolar']
############################################################
def compressEqualizeImage(self, image, selectCompressSet=2, numCbarlevels=20,
cbarformat='.3f'):
"""Compress an image (and then inversely expand the color bar values),
prior to histogram equalisation to ensure that the two keep in step,
we store the compression function names as pairs, and invoke the
compression function as follows: linear, log. sqrt. Note that the
image is histogram equalised in all cases.
Args:
| image (np.ndarray): the image to be processed
| selectCompressSet (int): compression selection [0,1,2] (optional)
| numCbarlevels (int): number of labels in the colourbar (optional)
| cbarformat (string): colourbar label format, e.g., '10.3f', '.5e' (optional)
Returns:
| imgHEQ (np.ndarray): the equalised image array
| customticksz (zip(float, string)): colourbar levels and associated levels
Raises:
| No exception is raised.
"""
#compress the input image - rescale color bar tick to match below
#also collapse into single dimension
imgFlat = self.compressSet[selectCompressSet][0](image.flatten())
imgFlatSort = np.sort(imgFlat)
#cumulative distribution
cdf = imgFlatSort.cumsum()/imgFlatSort[-1]
#remap image values to achieve histogram equalisation
y=np.interp(imgFlat,imgFlatSort, cdf )
#and reshape to original image shape
imgHEQ = y.reshape(image.shape)
# #plot the histogram mapping
# minData = np.min(imgFlat)
# maxData = np.max(imgFlat)
# print('Image irradiance range minimum={0} maximum={1}'.format(minData, maxData))
# irradRange=np.linspace(minData, maxData, 100)
# normalRange = np.interp(irradRange,imgFlatSort, cdf )
# H = ryplot.Plotter(1, 1, 1,'Mapping Input Irradiance to Equalised Value',
# figsize=(10, 10))
# H.plot(1, "","Irradiance [W/(m$^2$)]", "Equalised value",irradRange,
# normalRange, powerLimits = [-4, 2, -10, 2])
# #H.getPlot().show()
# H.saveFig('cumhist{0}.png'.format(entry), dpi=300)
#prepare the color bar tick labels from image values (as plotted)
imgLevels = np.linspace(np.min(imgHEQ), np.max(imgHEQ), numCbarlevels)
#map back from image values to original values as read it (inverse to above)
irrLevels=np.interp(imgLevels,cdf, imgFlatSort)
#uncompress the tick labels - match with compression above
fstr = '{0:' + cbarformat + '}'
customticksz = list(zip(imgLevels, [fstr.format(self.compressSet[selectCompressSet][1](x)) for x in irrLevels]))
return imgHEQ, customticksz
##############################################################################
##
def reprojectImageIntoPolar(self, data, origin=None, framesFirst=True,cval=0.0):
"""Reprojects a 3D numpy array into a polar coordinate system, relative to some origin.
This function reprojects an image from cartesian to polar coordinates.
The origin of the new coordinate system defaults to the center of the image,
unless the user supplies a new origin.
The data format can be data.shape = (rows, cols, frames) or
data.shape = (frames, rows, cols), the format of which is indicated by the
framesFirst parameter.
The reprojectImageIntoPolar function maps radial to cartesian coords.
The radial image is however presented in a cartesian grid, the corners have no meaning.
The radial coordinates are mapped to the radius, not the corners.
This means that in order to map corners, the frequency is scaled with sqrt(2),
The corners are filled with the value specified in cval.
Args:
| data (np.array): 3-D array to which transformation must be applied.
| origin ( (x-orig, y-orig) ): data-coordinates of where origin should be placed
| framesFirst (bool): True if data.shape is (frames, rows, cols), False if
data.shape is (rows, cols, frames)
| cval (float): the fill value to be used in coords outside the mapped range(optional)
Returns:
| output (float np.array): transformed images/array data in the same sequence as input sequence.
| r_i (np.array[N,]): radial values for returned image.
| theta_i (np.array[M,]): angular values for returned image.
Raises:
| No exception is raised.
original code by <NAME>
https://stackoverflow.com/questions/3798333/image-information-along-a-polar-coordinate-system
"""
import pyradi.ryutils as ryutils
# import scipy as sp
import scipy.ndimage as spndi
if framesFirst:
data = ryutils.framesLast(data)
ny, nx = data.shape[:2]
if origin is None:
origin = (nx//2, ny//2)
# Determine what the min and max r and theta coords will be
x, y = ryutils.index_coords(data, origin=origin, framesFirst=framesFirst )
r, theta = ryutils.cart2polar(x, y)
# Make a regular (in polar space) grid based on the min and max r & theta
r_i = np.linspace(r.min(), r.max(), nx)
theta_i = np.linspace(theta.min(), theta.max(), ny)
theta_grid, r_grid = np.meshgrid(theta_i, r_i)
# Project the r and theta grid back into pixel coordinates
xi, yi = ryutils.polar2cart(r_grid, theta_grid)
xi += origin[0] # We need to shift the origin back to
yi += origin[1] # back to the lower-left corner...
xi, yi = xi.flatten(), yi.flatten()
coords = np.vstack((xi, yi)) # (map_coordinates requires a 2xn array)
# Reproject each band individually and the restack
# (uses less memory than reprojection the 3-dimensional array in one step)
bands = []
for band in data.T:
zi = spndi.map_coordinates(band, coords, order=1,cval=cval)
bands.append(zi.reshape((nx, ny)))
output = np.dstack(bands)
if framesFirst:
output = ryutils.framesFirst(output)
return output, r_i, theta_i
###################################################################################
###################################################################################
class Plotter:
""" Encapsulates a plotting environment, optimized for compact code.
This class provides a wrapper around Matplotlib to provide a plotting
environment specialised towards typical pyradi visualisation.
These functions were developed to provide sophisticated plots by entering
the various plot options on a few lines, instead of typing many commands.
Provision is made for plots containing subplots (i.e., multiple plots on
the same figure), linear scale and log scale plots, images, and cartesian,
3-D and polar plots.
"""
############################################################
##
def __init__(self,fignumber=0,subpltnrow=1,subpltncol=1,\
figuretitle=None, figsize=(9,9), titlefontsize=14,
useplotly = False,doWarning=True):
"""Class constructor
The constructor defines the number for this figure, allowing future reference
to this figure. The number of subplot rows and columns allow the user to define
the subplot configuration. The user can also provide a title to be
used for the figure (centred on top) and finally, the size of the figure in inches
can be specified to scale the text relative to the figure.
Args:
| fignumber (int): the plt figure number, must be supplied
| subpltnrow (int): subplot number of rows
| subpltncol (int): subplot number of columns
| figuretitle (string): the overall heading for the figure
| figsize ((w,h)): the figure size in inches
| titlefontsize (int): the figure title size in points
| useplotly (bool): Plotly activation parameter
| doWarning (bool): print warning messages to the screen
Returns:
| Nothing. Creates the figure for subequent use.
Raises:
| No exception is raised.
"""
__all__ = ['__init__', 'saveFig', 'getPlot', 'plot', 'logLog', 'semilogX',
'semilogY', 'polar', 'showImage', 'plot3d', 'buildPlotCol',
'getSubPlot', 'meshContour', 'nextPlotCol', 'plotArray',
'polarMesh', 'resetPlotCol', 'mesh3D', 'polar3d', 'labelSubplot',
'emptyPlot','setup_pie_axes','pie']
version=mpl.__version__.split('.')
vnum=float(version[0]+'.'+version[1])
if vnum<1.1:
print('Install Matplotlib 1.1 or later')
print('current version is {0}'.format(vnum))
sys.exit(-1)
self.figurenumber = fignumber
self.fig = plt.figure(self.figurenumber)
self.fig.set_size_inches(figsize[0], figsize[1])
self.fig.clear()
self.figuretitle = figuretitle
self.doWarning = doWarning
#Plotly variables initialization
self.useplotly = useplotly
if self.useplotly:
self.Plotlyfig = []
self.Plotlydata = []
self.Plotlylayout = []
self.PlotlyXaxisTitles = []
self.PlotlyYaxisTitles = []
self.PlotlySubPlotTitles = []
self.PlotlySubPlotLabels = []
self.PlotlySubPlotNumbers = []
self.PlotlyPlotCalls = 0
self.PLcolor=''
self.PLwidth=0
self.PLdash=''
self.PLmultiAxisTitle=''
self.PLmultipleYAxis=False
self.PLyAxisSide=''
self.PLyAxisOverlaying=''
self.PLmultipleXAxis=False
self.PLxAxisSide=''
self.PLxAxisOverlaying=''
self.PLIs3D=False
self.PLType=''
self.nrow=subpltnrow
self.ncol=subpltncol
# width reserved for space between subplots
self.fig.subplots_adjust(wspace=0.25)
#height reserved for space between subplots
self.fig.subplots_adjust(hspace=0.4)
#height reserved for top of the subplots of the figure
self.fig.subplots_adjust(top=0.88)
# define the default line colour and style
self.buildPlotCol(plotCol=None, n=None)
self.bbox_extra_artists = []
self.subplots = {}
self.gridSpecsOuter = {}
self.arrayRows = {}
self.gridSpecsInner = {}
if figuretitle:
self.figtitle=plt.gcf().text(.5,.95,figuretitle,\
horizontalalignment='center',\
fontproperties=FontProperties(size=titlefontsize))
self.bbox_extra_artists.append(self.figtitle)
############################################################
##
def buildPlotCol(self, plotCol=None, n=None):
"""Set a sequence of default colour styles of
appropriate length.
The constructor provides a sequence with length
14 pre-defined plot styles.
The user can define a new sequence if required.
This function modulus-folds either sequence, in
case longer sequences are required.
Colours can be one of the basic colours:
['b', 'g', 'r', 'c', 'm', 'y', 'k']
or it can be a gray shade float value between 0 and 1,
such as '0.75', or it can be in hex format '#eeefff'
or it can be one of the legal html colours.
See http://html-color-codes.info/ and
http://www.computerhope.com/htmcolor.htm.
http://latexcolor.com/
Args:
| plotCol ([strings]): User-supplied list
| of plotting styles(can be empty []).
| n (int): Length of required sequence.
Returns:
| A list with sequence of plot styles, of required length.
Raises:
| No exception is raised.
"""
# assemble the list as requested, use default if not specified
if plotCol is None:
plotCol = ['b', 'g', 'r', 'c', 'm', 'y', 'k',
'#5D8AA8','#E52B50','#FF7E00','#9966CC','#CD9575','#915C83',
'#008000','#4B5320','#B2BEB5','#A1CAF1','#FE6F5E','#333399',
'#DE5D83','#800020','#1E4D2B','#00BFFF','#007BA7','#FFBCD9']
if n is None:
n = len(plotCol)
self.plotCol = [plotCol[i % len(plotCol)] for i in range(n)]
# copy this to circular list as well
self.plotColCirc = itertools.cycle(self.plotCol)
return self.plotCol
############################################################
##
def nextPlotCol(self):
"""Returns the next entry in a sequence of default
plot line colour styles in circular list.
One day I want to do this with a generator....
Args:
| None
Returns:
| The next plot colour in the sequence.
Raises:
| No exception is raised.
"""
col = next(self.plotColCirc)
return col
############################################################
##
def resetPlotCol(self):
"""Resets the plot colours to start at the beginning of
the cycle.
Args:
| None
Returns:
| None.
Raises:
| No exception is raised.
"""
self.plotColCirc = itertools.cycle(self.plotCol)
############################################################
##
def saveFig(self, filename='mpl.png',dpi=300,bbox_inches='tight',\
pad_inches=0.1, useTrueType = True):
"""Save the plot to a disk file, using filename, dpi specification and bounding box limits.
One of matplotlib's design choices is a bounding box strategy which may result in a bounding box
that is smaller than the size of all the objects on the page. It took a while to figure this out,
but the current default values for bbox_inches and pad_inches seem to create meaningful
bounding boxes. These are however larger than the true bounding box. You still need a
tool such as epstools or Adobe Acrobat to trim eps files to the true bounding box.
The type of file written is picked up in the filename.
Most backends support png, pdf, ps, eps and svg.
Args:
| filename (string): output filename to write plot, file ext
| dpi (int): the resolution of the graph in dots per inch
| bbox_inches: see matplotlib docs for more detail.
| pad_inches: see matplotlib docs for more detail.
| useTrueType: if True, truetype fonts are used in eps/pdf files, otherwise Type3
Returns:
| Nothing. Saves a file to disk.
Raises:
| No exception is raised.
"""
# http://matplotlib.1069221.n5.nabble.com/TrueType-font-embedding-in-eps-problem-td12691.html
# http://stackoverflow.com/questions/5956182/cannot-edit-text-in-chart-exported-by-matplotlib-and-opened-in-illustrator
# http://newsgroups.derkeiler.com/Archive/Comp/comp.soft-sys.matlab/2008-07/msg02038.html
if useTrueType:
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
#http://stackoverflow.com/questions/15341757/how-to-check-that-pylab-backend-of-matplotlib-runs-inline/17826459#17826459
# print(mpl.get_backend())
if 'inline' in mpl.get_backend() and self.doWarning:
print('**** If saveFig does not work inside the notebook please comment out the line "%matplotlib inline" ')
print('To disable ryplot warnings, set doWarning=False')
# return
if len(filename)>0:
if self.bbox_extra_artists:
self.fig.savefig(filename, dpi=dpi, bbox_inches=bbox_inches,
pad_inches=pad_inches,\
bbox_extra_artists= self.bbox_extra_artists);
else:
self.fig.savefig(filename, dpi=dpi, bbox_inches=bbox_inches,
pad_inches=pad_inches);
############################################################
##
def getPlot(self):
"""Returns a handle to the current figure
Args:
| None
Returns:
| A handle to the current figure.
Raises:
| No exception is raised.
"""
return self.fig
############################################################
##
def labelSubplot(self, spax, ptitle=None, xlabel=None, ylabel=None, zlabel=None,
titlefsize=10, labelfsize=10, ):
"""Set the sub-figure title and axes labels (cartesian plots only).
Args:
| spax (handle): subplot axis handle where labels must be drawn
| ptitle (string): plot title (optional)
| xlabel (string): x-axis label (optional)
| ylabel (string): y-axis label (optional)
| zlabel (string): z axis label (optional)
| titlefsize (float): title fontsize (optional)
| labelfsize (float): x,y,z label fontsize (optional)
Returns:
| None.
Raises:
| No exception is raised.
"""
if xlabel is not None:
spax.set_xlabel(xlabel,fontsize=labelfsize)
if ylabel is not None:
spax.set_ylabel(ylabel,fontsize=labelfsize)
if zlabel is not None:
spax.set_ylabel(zlabel,fontsize=labelfsize)
if ptitle is not None:
spax.set_title(ptitle,fontsize=titlefsize)
############################################################
##
def getSubPlot(self, subplotNum = 1):
"""Returns a handle to the subplot, as requested per subplot number.
Subplot numbers range from 1 upwards.
Args:
| subplotNumer (int) : number of the subplot
Returns:
| A handle to the requested subplot or None if not found.
Raises:
| No exception is raised.
"""
if (self.nrow,self.ncol, subplotNum) in list(self.subplots.keys()):
return self.subplots[(self.nrow,self.ncol, subplotNum)]
else:
return None
############################################################
##
def getXLim(self, subplotNum = 1):
"""Returns the x limits of the current subplot.
Subplot numbers range from 1 upwards.
Args:
| subplotNumer (int) : number of the subplot
Returns:
| An array with the two limits
Raises:
| No exception is raised.
"""
if (self.nrow,self.ncol, subplotNum) in list(self.subplots.keys()):
return np.asarray(self.subplots[(self.nrow,self.ncol, subplotNum)].get_xlim())
else:
return None
############################################################
##
def getYLim(self, subplotNum = 1):
"""Returns the y limits of the current subplot.
Subplot numbers range from 1 upwards.
Args:
| subplotNumer (int) : number of the subplot
Returns:
| An array with the two limits
Raises:
| No exception is raised.
"""
if (self.nrow,self.ncol, subplotNum) in list(self.subplots.keys()):
return np.asarray(self.subplots[(self.nrow,self.ncol, subplotNum)].get_ylim())
else:
return None
############################################################
##
def verticalLineCoords(self,subplotNum=1,x=0):
"""Returns two arrays for vertical line at x in the specific subplot.
The line is drawn at specified x, with current y limits in subplot.
Subplot numbers range from 1 upwards.
Use as follows to draw a vertical line in plot:
p.plot(1,*p.verticalLineCoords(subplotNum=1,x=freq),plotCol=['k'])
Args:
| subplotNumer (int) : number of the subplot
| x (double): horizontal value used for line
Returns:
| A tuple with two arrays for line (x-coords,y-coords)
Raises:
| No exception is raised.
"""
if (self.nrow,self.ncol, subplotNum) in list(self.subplots.keys()):
handle = self.subplots[(self.nrow,self.ncol, subplotNum)]
x = np.asarray((x,x))
y = self.getYLim(subplotNum)
return x,y
else:
return None
############################################################
##
def horizontalLineCoords(self,subplotNum=1,y=0):
"""Returns two arrays for horizontal line at y in the specific subplot.
The line is drawn at specified y, with current x limits in subplot.
Subplot numbers range from 1 upwards.
Use as follows to draw a horizontal line in plot:
p.plot(1,*p.horizontalLineCoords(subplotNum=1,x=freq),plotCol=['k'])
Args:
| subplotNumer (int) : number of the subplot
| y (double): horizontal value used for line
Returns:
| A tuple with two arrays for line (x-coords,y-coords)
Raises:
| No exception is raised.
"""
if (self.nrow,self.ncol, subplotNum) in list(self.subplots.keys()):
handle = self.subplots[(self.nrow,self.ncol, subplotNum)]
y = np.asarray((y,y))
x = self.getXLim(subplotNum)
return x,y
else:
return None
############################################################
##
def plot(self, plotnum, x, y, ptitle=None, xlabel=None, ylabel=None,
plotCol=[], linewidths=None, label=[], legendAlpha=0.0,
legendLoc='best',
pltaxis=None, maxNX=10, maxNY=10, linestyle=None,
powerLimits = [-4, 2, -4, 2], titlefsize = 12,
xylabelfsize = 12, xytickfsize = 10, labelfsize=10,
xScientific=False, yScientific=False,
yInvert=False, xInvert=False, drawGrid=True,xIsDate=False,
xTicks=None, xtickRotation=0,
markers=[], markevery=None, markerfacecolor=True,markeredgecolor=True,
zorders=None, clip_on=True,axesequal=False,
xAxisFmt=None, yAxisFmt=None,
PLcolor=None,
PLwidth=None, PLdash=None, PLyAxisSide=None, PLyAxisOverlaying=None,
PLmultipleYAxis=False, PLmultiAxisTitle=None, PLxAxisSide=None,
PLxAxisOverlaying=None, PLmultipleXAxis=False ): #Plotly initialization parameters
"""Cartesian plot on linear scales for abscissa and ordinates.
Given an existing figure, this function plots in a specified subplot position.
The function arguments are described below in some detail. Note that the y-values
or ordinates can be more than one column, each column representing a different
line in the plot. This is convenient if large arrays of data must be plotted. If more
than one column is present, the label argument can contain the legend labels for
each of the columns/lines. The pltaxis argument defines the min/max scale values
for the x and y axes.
Args:
| plotnum (int): subplot number, 1-based index
| x (np.array[N,] or [N,1]): abscissa
| y (np.array[N,] or [N,M]): ordinates - could be M columns
| ptitle (string): plot title (optional)
| xlabel (string): x-axis label (optional)
| ylabel (string): y-axis label (optional)
| plotCol ([strings]): plot colour and line style, list with M entries, use default if [] (optional)
| linewidths ([float]): plot line width in points, list with M entries, use default if None (optional)
| label ([strings]): legend label for ordinate, list with M entries (optional)
| legendAlpha (float): transparency for legend box (optional)
| legendLoc (string): location for legend box (optional)
| pltaxis ([xmin, xmax, ymin,ymax]): scale for x,y axes. Let Matplotlib decide if None. (optional)
| maxNX (int): draw maxNX+1 tick labels on x axis (optional)
| maxNY (int): draw maxNY+1 tick labels on y axis (optional)
| linestyle (string): linestyle for this plot (optional)
| powerLimits[float]: scientific tick label power limits [x-low, x-high, y-low, y-high] (optional)
| titlefsize (int): title font size, default 12pt (optional)
| xylabelfsize (int): x-axis, y-axis label font size, default 12pt (optional)
| xytickfsize (int): x-axis, y-axis tick font size, default 10pt (optional)
| labelfsize (int): label/legend font size, default 10pt (optional)
| xScientific (bool): use scientific notation on x axis (optional)
| yScientific (bool): use scientific notation on y axis (optional)
| drawGrid (bool): draw the grid on the plot (optional)
| yInvert (bool): invert the y-axis (optional)
| xInvert (bool): invert the x-axis (optional)
| xIsDate (bool): convert the datetime x-values to dates (optional)
| xTicks ({tick:label}): dict of x-axis tick locations and associated labels (optional)
| xtickRotation (float) x-axis tick label rotation angle (optional)
| markers ([string]) markers to be used for plotting data points (optional)
| markevery (int | (startind, stride)) subsample when using markers (optional)
| markerfacecolor (True|None|str) if True same as plotCol, if None empty, otherwise str is colour (optional)
| markeredgecolor (True|None|str) if True same as plotCol, if None empty, otherwise str is colour (optional)
| zorders ([int]) list of zorder for drawing sequence, highest is last (optional)
| clip_on (bool) clips objects to drawing axes (optional)
| axesequal (bool) force scaling on x and y axes to be equal (optional)
| xAxisFmt (string) x-axis format string, e.g., '%.2f', default None (optional)
| yAxisFmt (string) y-axis format string, e.g., '%.2e',, default None (optional)
| PLcolor (string): graph color scheme. Format 'rgb(r,g,b)'
| PLwidth
| PLdash (string): Line stlye
| PLyAxisSide (string): Sets the location of the y-axis (left/right)
| PLyAxisOverlaying (string): Sets the overlaying
| PLmultipleYAxis (bool): Indicates presence of multiple axis
| PLmultiAxisTitle (string): Sets the title of the multiple axis
| PLxAxisSide (string): Sets the location of the x-axis (top/bottom)
| PLxAxisOverlaying (string): Sets the overlaying
| PLmultipleXAxis (bool): Indicates presence of multiple axis
Returns:
| the axis object for the plot
Raises:
| No exception is raised.
"""
#Plotly variables initialization
self.PLcolor=PLcolor
self.PLwidth=PLwidth
self.PLdash=PLdash
self.PLmultipleYAxis=PLmultipleYAxis
self.PLmultiAxisTitle=PLmultiAxisTitle
self.PLyAxisSide=PLyAxisSide
self.PLyAxisOverlaying=PLyAxisOverlaying
self.PLmultipleXAxis=PLmultipleXAxis
self.PLxAxisSide=PLxAxisSide
self.PLxAxisOverlaying=PLxAxisOverlaying
## see self.MyPlot for parameter details.
pkey = (self.nrow, self.ncol, plotnum)
if pkey not in list(self.subplots.keys()):
self.subplots[pkey] = \
self.fig.add_subplot(self.nrow,self.ncol, plotnum)
ax = self.subplots[pkey]
self.myPlot(ax.plot, plotnum, x, y, ptitle, xlabel, ylabel,
plotCol, linewidths, label,legendAlpha, legendLoc,
pltaxis, maxNX, maxNY, linestyle,
powerLimits,titlefsize,
xylabelfsize, xytickfsize,
labelfsize, drawGrid,
xScientific, yScientific,
yInvert, xInvert, xIsDate,
xTicks, xtickRotation,
markers, markevery, markerfacecolor,markeredgecolor,
zorders, clip_on,axesequal,
xAxisFmt,yAxisFmt)
return ax
############################################################
##
def logLog(self, plotnum, x, y, ptitle=None, xlabel=None, ylabel=None,
plotCol=[], linewidths=None, label=[],legendAlpha=0.0,
legendLoc='best',
pltaxis=None, maxNX=10, maxNY=10, linestyle=None,
powerLimits = [-4, 2, -4, 2], titlefsize = 12,
xylabelfsize = 12, xytickfsize = 10,labelfsize=10,
xScientific=False, yScientific=False,
yInvert=False, xInvert=False, drawGrid=True,xIsDate=False,
xTicks=None, xtickRotation=0,
markers=[], markevery=None, markerfacecolor=True,markeredgecolor=True,
zorders=None, clip_on=True,axesequal=False,
xAxisFmt=None, yAxisFmt=None,
PLcolor=None,
PLwidth=None, PLdash=None, PLyAxisSide=None, PLyAxisOverlaying=None,
PLmultipleYAxis=False, PLmultiAxisTitle=None):
"""Plot data on logarithmic scales for abscissa and ordinates.
Given an existing figure, this function plots in a specified subplot position.
The function arguments are described below in some detail. Note that the y-values
or ordinates can be more than one column, each column representing a different
line in the plot. This is convenient if large arrays of data must be plotted. If more
than one column is present, the label argument can contain the legend labels for
each of the columns/lines. The pltaxis argument defines the min/max scale values
for the x and y axes.
Args:
| plotnum (int): subplot number, 1-based index
| x (np.array[N,] or [N,1]): abscissa
| y (np.array[N,] or [N,M]): ordinates - could be M columns
| ptitle (string): plot title (optional)
| xlabel (string): x-axis label (optional)
| ylabel (string): y-axis label (optional)
| plotCol ([strings]): plot colour and line style, list with M entries, use default if [] (optional)
| linewidths ([float]): plot line width in points, list with M entries, use default if None (optional)
| label ([strings]): legend label for ordinate, list with M entries (optional)
| legendAlpha (float): transparency for legend box (optional)
| legendLoc (string): location for legend box (optional)
| pltaxis ([xmin, xmax, ymin,ymax]): scale for x,y axes. Let Matplotlib decide if None. (optional)
| pltaxis ([xmin, xmax, ymin,ymax]): scale for x,y axes. Let Matplotlib decide if None. (optional)
| maxNX (int): draw maxNX+1 tick labels on x axis (optional)
| maxNY (int): draw maxNY+1 tick labels on y axis (optional)
| linestyle (string): linestyle for this plot (optional)
| powerLimits[float]: scientific tick label power limits [x-low, x-high, y-low, y-high] (optional) (optional)
| titlefsize (int): title font size, default 12pt (optional)
| xylabelfsize (int): x-axis, y-axis label font size, default 12pt (optional)
| xytickfsize (int): x-axis, y-axis tick font size, default 10pt (optional)
| labelfsize (int): label/legend font size, default 10pt (optional)
| xScientific (bool): use scientific notation on x axis (optional)
| yScientific (bool): use scientific notation on y axis (optional)
| drawGrid (bool): draw the grid on the plot (optional)
| yInvert (bool): invert the y-axis (optional)
| xInvert (bool): invert the x-axis (optional)
| xIsDate (bool): convert the datetime x-values to dates (optional)
| xTicks ({tick:label}): dict of x-axis tick locations and associated labels (optional)
| xtickRotation (float) x-axis tick label rotation angle (optional)
| markers ([string]) markers to be used for plotting data points (optional)
| markevery (int | (startind, stride)) subsample when using markers (optional)
| markerfacecolor (True|None|str) if True same as plotCol, if None empty, otherwise str is colour (optional)
| markeredgecolor (True|None|str) if True same as plotCol, if None empty, otherwise str is colour (optional)
| zorders ([int]) list of zorder for drawing sequence, highest is last (optional)
| clip_on (bool) clips objects to drawing axes (optional)
| axesequal (bool) force scaling on x and y axes to be equal (optional)
| xAxisFmt (string) x-axis format string, e.g., '%.2f', default None (optional)
| yAxisFmt (string) y-axis format string, e.g., '%.2e',, default None (optional)
| PLcolor (string): graph color scheme. Format 'rgb(r,g,b)'
| PLwidth
| PLdash (string): Line stlye
| PLyAxisSide (string): Sets the location of the y-axis (left/right)
| PLyAxisOverlaying (string): Sets the overlaying
| PLmultipleYAxis (bool): Indicates presence of multiple axis
| PLmultiAxisTitle (string): Sets the title of the multiple axis
Returns:
| the axis object for the plot
Raises:
| No exception is raised.
"""
# Plotly variables initialization
self.PLcolor=PLcolor
self.PLwidth=PLwidth
self.PLdash=PLdash
self.PLmultipleYAxis=PLmultipleYAxis
self.PLmultiAxisTitle=PLmultiAxisTitle
self.PLyAxisSide=PLyAxisSide
self.PLyAxisOverlaying=PLyAxisOverlaying
## see self.MyPlot for parameter details.
pkey = (self.nrow, self.ncol, plotnum)
if pkey not in list(self.subplots.keys()):
self.subplots[pkey] = \
self.fig.add_subplot(self.nrow,self.ncol, plotnum)
ax = self.subplots[pkey]
# self.myPlot(ax.loglog, plotnum, x, y, ptitle, xlabel,ylabel,\
# plotCol, label,legendAlpha, pltaxis, \
# maxNX, maxNY, linestyle, powerLimits,titlefsize,xylabelfsize,
# xytickfsize,labelfsize, drawGrid
# xTicks, xtickRotation,
# markers=markers)
self.myPlot(ax.loglog, plotnum, x, y, ptitle, xlabel, ylabel,
plotCol, linewidths, label,legendAlpha, legendLoc,
pltaxis, maxNX, maxNY, linestyle,
powerLimits,titlefsize,
xylabelfsize, xytickfsize,
labelfsize, drawGrid,
xScientific, yScientific,
yInvert, xInvert, xIsDate,
xTicks, xtickRotation,
markers, markevery, markerfacecolor,markeredgecolor,
zorders, clip_on,axesequal,
xAxisFmt,yAxisFmt)
return ax
############################################################
##
def semilogX(self, plotnum, x, y, ptitle=None, xlabel=None, ylabel=None,
plotCol=[], linewidths=None, label=[],legendAlpha=0.0,
legendLoc='best',
pltaxis=None, maxNX=10, maxNY=10, linestyle=None,
powerLimits = [-4, 2, -4, 2], titlefsize = 12,
xylabelfsize = 12, xytickfsize = 10,labelfsize=10,
xScientific=False, yScientific=False,
yInvert=False, xInvert=False, drawGrid=True,xIsDate=False,
xTicks=None, xtickRotation=0,
markers=[], markevery=None, markerfacecolor=True,markeredgecolor=True,
zorders=None, clip_on=True, axesequal=False,
xAxisFmt=None, yAxisFmt=None,
PLcolor=None,
PLwidth=None, PLdash=None, PLyAxisSide=None, PLyAxisOverlaying=None,
PLmultipleYAxis=False, PLmultiAxisTitle=None):
"""Plot data on logarithmic scales for abscissa and linear scale for ordinates.
Given an existing figure, this function plots in a specified subplot position.
The function arguments are described below in some detail. Note that the y-values
or ordinates can be more than one column, each column representing a different
line in the plot. This is convenient if large arrays of data must be plotted. If more
than one column is present, the label argument can contain the legend labels for
each of the columns/lines. The pltaxis argument defines the min/max scale values
for the x and y axes.
Args:
| plotnum (int): subplot number, 1-based index
| x (np.array[N,] or [N,1]): abscissa
| y (np.array[N,] or [N,M]): ordinates - could be M columns
| ptitle (string): plot title (optional)
| xlabel (string): x-axis label (optional)
| ylabel (string): y-axis label (optional)
| plotCol ([strings]): plot colour and line style, list with M entries, use default if [] (optional)
| linewidths ([float]): plot line width in points, list with M entries, use default if None (optional)
| label ([strings]): legend label for ordinate, list with M entries (optional)
| legendAlpha (float): transparency for legend box (optional)
| legendLoc (string): location for legend box (optional)
| pltaxis ([xmin, xmax, ymin,ymax]): scale for x,y axes. Let Matplotlib decide if None. (optional)
| maxNX (int): draw maxNX+1 tick labels on x axis (optional)
| maxNY (int): draw maxNY+1 tick labels on y axis (optional)
| linestyle (string): linestyle for this plot (optional)
| powerLimits[float]: scientific tick label notation power limits [x-low, x-high, y-low, y-high] (optional) (optional)
| titlefsize (int): title font size, default 12pt (optional)
| xylabelfsize (int): x-axis, y-axis label font size, default 12pt (optional)
| xytickfsize (int): x-axis, y-axis tick font size, default 10pt (optional)
| labelfsize (int): label/legend font size, default 10pt (optional)
| xScientific (bool): use scientific notation on x axis (optional)
| yScientific (bool): use scientific notation on y axis (optional)
| drawGrid (bool): draw the grid on the plot (optional)
| yInvert (bool): invert the y-axis (optional)
| xInvert (bool): invert the x-axis (optional)
| xIsDate (bool): convert the datetime x-values to dates (optional)
| xTicks ({tick:label}): dict of x-axis tick locations and associated labels (optional)
| xtickRotation (float) x-axis tick label rotation angle (optional)
| markers ([string]) markers to be used for plotting data points (optional)
| markevery (int | (startind, stride)) subsample when using markers (optional)
| markerfacecolor (True|None|str) if True same as plotCol, if None empty, otherwise str is colour (optional)
| markeredgecolor (True|None|str) if True same as plotCol, if None empty, otherwise str is colour (optional)
| zorders ([int]) list of zorder for drawing sequence, highest is last (optional)
| clip_on (bool) clips objects to drawing axes (optional)
| axesequal (bool) force scaling on x and y axes to be equal (optional)
| xAxisFmt (string) x-axis format string, e.g., '%.2f', default None (optional)
| yAxisFmt (string) y-axis format string, e.g., '%.2e',, default None (optional)
| PLcolor (string): graph color scheme. Format 'rgb(r,g,b)'
| PLwidth
| PLdash (string): Line stlye
| PLyAxisSide (string): Sets the location of the y-axis (left/right)
| PLyAxisOverlaying (string): Sets the overlaying
| PLmultipleYAxis (bool): Indicates presence of multiple axis
| PLmultiAxisTitle (string): Sets the title of the multiple axis
Returns:
| the axis object for the plot
Raises:
| No exception is raised.
"""
#Plotly variables initialization
self.PLcolor=PLcolor
self.PLwidth=PLwidth
self.PLdash=PLdash
self.PLmultipleYAxis=PLmultipleYAxis
self.PLmultiAxisTitle=PLmultiAxisTitle
self.PLyAxisSide=PLyAxisSide
self.PLyAxisOverlaying=PLyAxisOverlaying
## see self.MyPlot for parameter details.
pkey = (self.nrow, self.ncol, plotnum)
if pkey not in list(self.subplots.keys()):
self.subplots[pkey] = \
self.fig.add_subplot(self.nrow,self.ncol, plotnum)
ax = self.subplots[pkey]
self.myPlot(ax.semilogx, plotnum, x, y, ptitle, xlabel, ylabel,\
plotCol, linewidths, label,legendAlpha, legendLoc,
pltaxis, maxNX, maxNY, linestyle,
powerLimits, titlefsize,
xylabelfsize, xytickfsize,
labelfsize, drawGrid,
xScientific, yScientific,
yInvert, xInvert, xIsDate,
xTicks, xtickRotation,
markers, markevery, markerfacecolor,markeredgecolor,
zorders, clip_on,axesequal,
xAxisFmt,yAxisFmt)
return ax
############################################################
##
def semilogY(self, plotnum, x, y, ptitle=None, xlabel=None, ylabel=None,
plotCol=[], linewidths=None, label=[],legendAlpha=0.0,
legendLoc='best',
pltaxis=None, maxNX=10, maxNY=10, linestyle=None,
powerLimits = [-4, 2, -4, 2], titlefsize = 12,
xylabelfsize = 12, xytickfsize = 10, labelfsize=10,
xScientific=False, yScientific=False,
yInvert=False, xInvert=False, drawGrid=True,xIsDate=False,
xTicks=None, xtickRotation=0,
markers=[], markevery=None, markerfacecolor=True,markeredgecolor=True,
zorders=None, clip_on=True,axesequal=False,
xAxisFmt=None, yAxisFmt=None,
PLcolor=None,
PLwidth=None, PLdash=None, PLyAxisSide=None, PLyAxisOverlaying=None,
PLmultipleYAxis=False, PLmultiAxisTitle=None):
"""Plot data on linear scales for abscissa and logarithmic scale for ordinates.
Given an existing figure, this function plots in a specified subplot position.
The function arguments are described below in some detail. Note that the y-values
or ordinates can be more than one column, each column representing a different
line in the plot. This is convenient if large arrays of data must be plotted. If more
than one column is present, the label argument can contain the legend labels for
each of the columns/lines. The pltaxis argument defines the min/max scale values
for the x and y axes.
Args:
| plotnum (int): subplot number, 1-based index
| x (np.array[N,] or [N,1]): abscissa
| y (np.array[N,] or [N,M]): ordinates - could be M columns
| ptitle (string): plot title (optional)
| xlabel (string): x-axis label (optional)
| ylabel (string): y-axis label (optional)
| plotCol ([strings]): plot colour and line style, list with M entries, use default if [] (optional)
| linewidths ([float]): plot line width in points, list with M entries, use default if None (optional)
| label ([strings]): legend label for ordinate, list withM entries (optional)
| legendAlpha (float): transparency for legend box (optional)
| legendLoc (string): location for legend box (optional)
| pltaxis ([xmin, xmax, ymin,ymax]): scale for x,y axes. Let Matplotlib decide if None. (optional)
| maxNX (int): draw maxNX+1 tick labels on x axis (optional)
| maxNY (int): draw maxNY+1 tick labels on y axis (optional)
| linestyle (string): linestyle for this plot (optional)
| powerLimits[float]: scientific tick label power limits [x-low, x-high, y-low, y-high] (optional) (optional)
| titlefsize (int): title font size, default 12pt (optional)
| xylabelfsize (int): x-axis, y-axis label font size, default 12pt (optional)
| xytickfsize (int): x-axis, y-axis tick font size, default 10pt (optional)
| labelfsize (int): label/legend font size, default 10pt (optional)
| xScientific (bool): use scientific notation on x axis (optional)
| yScientific (bool): use scientific notation on y axis (optional)
| drawGrid (bool): draw the grid on the plot (optional)
| yInvert (bool): invert the y-axis (optional)
| xInvert (bool): invert the x-axis (optional)
| xIsDate (bool): convert the datetime x-values to dates (optional)
| xTicks ({tick:label}): dict of x-axis tick locations and associated labels (optional)
| xtickRotation (float) x-axis tick label rotation angle (optional)
| markers ([string]) markers to be used for plotting data points (optional)
| markevery (int | (startind, stride)) subsample when using markers (optional)
| markerfacecolor (True|None|str) if True same as plotCol, if None empty, otherwise str is colour (optional)
| markeredgecolor (True|None|str) if True same as plotCol, if None empty, otherwise str is colour (optional)
| zorders ([int]) list of zorder for drawing sequence, highest is last (optional)
| clip_on (bool) clips objects to drawing axes (optional)
| axesequal (bool) force scaling on x and y axes to be equal (optional)
| xAxisFmt (string) x-axis format string, e.g., '%.2f', default None (optional)
| yAxisFmt (string) y-axis format string, e.g., '%.2e',, default None (optional)
| PLcolor (string): graph color scheme. Format 'rgb(r,g,b)'
| PLwidth
| PLdash (string): Line stlye
| PLyAxisSide (string): Sets the location of the y-axis (left/right)
| PLyAxisOverlaying (string): Sets the overlaying
| PLmultipleYAxis (bool): Indicates presence of multiple axis
| PLmultiAxisTitle (string): Sets the title of the multiple axis
Returns:
| the axis object for the plot
Raises:
| No exception is raised.
"""
#Plotly variables initialization
self.PLcolor=PLcolor
self.PLwidth=PLwidth
self.PLdash=PLdash
self.PLmultipleYAxis=PLmultipleYAxis
self.PLmultiAxisTitle=PLmultiAxisTitle
self.PLyAxisSide=PLyAxisSide
self.PLyAxisOverlaying=PLyAxisOverlaying
## see self.MyPlot for parameter details.
pkey = (self.nrow, self.ncol, plotnum)
if pkey not in list(self.subplots.keys()):
self.subplots[pkey] = \
self.fig.add_subplot(self.nrow,self.ncol, plotnum)
ax = self.subplots[pkey]
self.myPlot(ax.semilogy, plotnum, x, y, ptitle,xlabel,ylabel,
plotCol, linewidths, label,legendAlpha, legendLoc,
pltaxis, maxNX, maxNY, linestyle,
powerLimits, titlefsize,
xylabelfsize, xytickfsize,
labelfsize, drawGrid,
xScientific, yScientific,
yInvert, xInvert, xIsDate,
xTicks, xtickRotation,
markers, markevery, markerfacecolor,markeredgecolor,
zorders, clip_on,
axesequal,xAxisFmt,yAxisFmt)
return ax
############################################################
##
def stackplot(self, plotnum, x, y, ptitle=None, xlabel=None, ylabel=None,
plotCol=[], linewidths=None, label=[],legendAlpha=0.0,
legendLoc='best',
pltaxis=None, maxNX=10, maxNY=10, linestyle=None,
powerLimits = [-4, 2, -4, 2], titlefsize = 12,
xylabelfsize = 12, xytickfsize = 10, labelfsize=10,
xScientific=False, yScientific=False,
yInvert=False, xInvert=False, drawGrid=True,xIsDate=False,
xTicks=None, xtickRotation=0,
markers=[], markevery=None, markerfacecolor=True,markeredgecolor=True,
zorders=None, clip_on=True, axesequal=False,
xAxisFmt=None, yAxisFmt=None,
PLcolor=None,
PLwidth=None, PLdash=None, PLyAxisSide=None, PLyAxisOverlaying=None,
PLmultipleYAxis=False, PLmultiAxisTitle=None):
"""Plot stacked data on linear scales for abscissa and ordinates.
Given an existing figure, this function plots in a specified subplot position.
The function arguments are described below in some detail. Note that the y-values
or ordinates can be more than one column, each column representing a different
line in the plot. If more
than one column is present, the label argument can contain the legend labels for
each of the columns/lines. The pltaxis argument defines the min/max scale values
for the x and y axes.
Args:
| plotnum (int): subplot number, 1-based index
| x (np.array[N,] or [N,1]): abscissa
| y (np.array[N,] or [N,M]): ordinates - could be M columns
| ptitle (string): plot title (optional)
| xlabel (string): x-axis label (optional)
| ylabel (string): y-axis label (optional)
| plotCol ([strings]): plot colour and line style, list with M entries, use default if [] (optional)
| linewidths ([float]): plot line width in points, list with M entries, use default if None (optional)
| label ([strings]): legend label for ordinate, list withM entries (optional)
| legendAlpha (float): transparency for legend box (optional)
| legendLoc (string): location for legend box (optional)
| pltaxis ([xmin, xmax, ymin,ymax]): scale for x,y axes. Let Matplotlib decide if None. (optional)
| maxNX (int): draw maxNX+1 tick labels on x axis (optional)
| maxNY (int): draw maxNY+1 tick labels on y axis (optional)
| linestyle (string): linestyle for this plot (optional)
| powerLimits[float]: scientific tick label power limits [x-low, x-high, y-low, y-high] (optional) (optional)
| titlefsize (int): title font size, default 12pt (optional)
| xylabelfsize (int): x-axis, y-axis label font size, default 12pt (optional)
| xytickfsize (int): x-axis, y-axis tick font size, default 10pt (optional)
| labelfsize (int): label/legend font size, default 10pt (optional)
| xScientific (bool): use scientific notation on x axis (optional)
| yScientific (bool): use scientific notation on y axis (optional)
| drawGrid (bool): draw the grid on the plot (optional)
| yInvert (bool): invert the y-axis (optional)
| xInvert (bool): invert the x-axis (optional)
| xIsDate (bool): convert the datetime x-values to dates (optional)
| xTicks ({tick:label}): dict of x-axis tick locations and associated labels (optional)
| xtickRotation (float) x-axis tick label rotation angle (optional)
| markers ([string]) markers to be used for plotting data points (optional)
| markevery (int | (startind, stride)) subsample when using markers (optional)
| markerfacecolor (True|None|str) if True same as plotCol, if None empty, otherwise str is colour (optional)
| markeredgecolor (True|None|str) if True same as plotCol, if None empty, otherwise str is colour (optional)
| zorders ([int]) list of zorder for drawing sequence, highest is last (optional)
| clip_on (bool) clips objects to drawing axes (optional)
| axesequal (bool) force scaling on x and y axes to be equal (optional)
| xAxisFmt (string) x-axis format string, e.g., '%.2f', default None (optional)
| yAxisFmt (string) y-axis format string, e.g., '%.2e',, default None (optional)
| PLcolor (string): graph color scheme. Format 'rgb(r,g,b)'
| PLwidth
| PLdash (string): Line stlye
| PLyAxisSide (string): Sets the location of the y-axis (left/right)
| PLyAxisOverlaying (string): Sets the overlaying
| PLmultipleYAxis (bool): Indicates presence of multiple axis
| PLmultiAxisTitle (string): Sets the title of the multiple axis
Returns:
| the axis object for the plot
Raises:
| No exception is raised.
"""
#Plotly variables initialization
self.PLcolor=PLcolor
self.PLwidth=PLwidth
self.PLdash=PLdash
self.PLmultipleYAxis=PLmultipleYAxis
self.PLmultiAxisTitle=PLmultiAxisTitle
self.PLyAxisSide=PLyAxisSide
self.PLyAxisOverlaying=PLyAxisOverlaying
## see self.MyPlot for parameter details.
pkey = (self.nrow, self.ncol, plotnum)
if pkey not in list(self.subplots.keys()):
self.subplots[pkey] = \
self.fig.add_subplot(self.nrow,self.ncol, plotnum)
ax = self.subplots[pkey]
self.myPlot(ax.stackplot, plotnum, x, y.T, ptitle,xlabel,ylabel,
plotCol, linewidths, label,legendAlpha, legendLoc,
pltaxis, maxNX, maxNY, linestyle,
powerLimits, titlefsize,
xylabelfsize, xytickfsize,
labelfsize, drawGrid,
xScientific, yScientific,
yInvert, xInvert, xIsDate,
xTicks, xtickRotation,
markers, markevery, markerfacecolor,markeredgecolor,
zorders, clip_on,
axesequal,xAxisFmt,yAxisFmt)
return ax
############################################################
##
def myPlot(self, plotcommand,plotnum, x, y, ptitle=None,xlabel=None, ylabel=None,
plotCol=[], linewidths=None, label=[], legendAlpha=0.0,
legendLoc='best',
pltaxis=None, maxNX=0, maxNY=0, linestyle=None,
powerLimits = [-4, 2, -4, 2], titlefsize = 12,
xylabelfsize = 12, xytickfsize = 10,
labelfsize=10, drawGrid=True,
xScientific=False, yScientific=False,
yInvert=False, xInvert=False, xIsDate=False,
xTicks=None, xtickRotation=0,
markers=[], markevery=None,
markerfacecolor=True,markeredgecolor=True,
zorders=None,clip_on=True,axesequal=False,
xAxisFmt=None,yAxisFmt=None,
PLyStatic=[0]
):
"""Low level helper function to create a subplot and plot the data as required.
This function does the actual plotting, labelling etc. It uses the plotting
function provided by its user functions.
lineStyles = {
'': '_draw_nothing',
' ': '_draw_nothing',
'None': '_draw_nothing',
'--': '_draw_dashed',
'-.': '_draw_dash_dot',
'-': '_draw_solid',
':': '_draw_dotted'}
Args:
| plotcommand: name of a MatplotLib plotting function
| plotnum (int): subplot number, 1-based index
| ptitle (string): plot title
| xlabel (string): x axis label
| ylabel (string): y axis label
| x (np.array[N,] or [N,1]): abscissa
| y (np.array[N,] or [N,M]): ordinates - could be M columns
| plotCol ([strings]): plot colour and line style, list with M entries, use default if []
| linewidths ([float]): plot line width in points, list with M entries, use default if None (optional)
| label ([strings]): legend label for ordinate, list with M entries
| legendAlpha (float): transparency for legend box
| legendLoc (string): location for legend box (optional)
| pltaxis ([xmin, xmax, ymin,ymax]): scale for x,y axes. Let Matplotlib decide if None.
| maxNX (int): draw maxNX+1 tick labels on x axis
| maxNY (int): draw maxNY+1 tick labels on y axis
| linestyle (string): linestyle for this plot (optional)
| powerLimits[float]: scientific tick label power limits [x-low, x-high, y-low, y-high] (optional)
| titlefsize (int): title font size, default 12pt (optional)
| xylabelfsize (int): x-axis, y-axis label font size, default 12pt (optional)
| xytickfsize (int): x-axis, y-axis tick font size, default 10pt (optional)
| labelfsize (int): label/legend font size, default 10pt (optional)
| drawGrid (bool): draw the grid on the plot (optional)
| xScientific (bool): use scientific notation on x axis (optional)
| yScientific (bool): use scientific notation on y axis (optional)
| yInvert (bool): invert the y-axis (optional)
| xInvert (bool): invert the x-axis (optional)
| xIsDate (bool): convert the datetime x-values to dates (optional)
| xTicks ({tick:label}): dict of x-axis tick locations and associated labels (optional)
| xtickRotation (float) x-axis tick label rotation angle (optional)
| markers ([string]) markers to be used for plotting data points (optional)
| markevery (int | (startind, stride)) subsample when using markers (optional)
| markerfacecolor (True|None|str) if True same as plotCol, if None empty, otherwise str is colour (optional)
| markeredgecolor (True|None|str) if True same as plotCol, if None empty, otherwise str is colour (optional)
| zorders ([int]) list of zorder for drawing sequence, highest is last (optional)
| clip_on (bool) clips objects to drawing axes (optional)
| axesequal (bool) force scaling on x and y axes to be equal (optional)
| xAxisFmt (string) x-axis format string, e.g., '%.2f', default None (optional)
| yAxisFmt (string) y-axis format string, e.g., '%.2e',, default None (optional)
| PLyStatic ([int]) the guy that added this did not document it properly
Returns:
| the axis object for the plot
Raises:
| No exception is raised.
"""
#Initialize plotlyPlot call when Plotly is activated
if self.useplotly:
self.PlotlyPlotCalls = self.PlotlyPlotCalls + 1
if x.ndim>1:
xx=x
else:
if type(x)==type(pd.Series()):
x = x.values
xx=x.reshape(-1, 1)
if y.ndim>1:
yy=y
else:
if type(y)==type(pd.Series()):
y = y.values
yy=y.reshape(-1, 1)
# plotCol = self.buildPlotCol(plotCol, yy.shape[1])
pkey = (self.nrow, self.ncol, plotnum)
ax = self.subplots[pkey]
if drawGrid:
ax.grid(True)
else:
ax.grid(False)
# use scientific format on axes
#yfm = sbp.yaxis.get_major_formatter()
#yfm.set_powerlimits([ -3, 3])
if xlabel is not None:
ax.set_xlabel(xlabel, fontsize=xylabelfsize)
if ylabel is not None:
ax.set_ylabel(ylabel, fontsize=xylabelfsize)
if xIsDate:
ax.xaxis_date()
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
ax.xaxis.set_major_locator(mdates.DayLocator())
if maxNX >0:
ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator(maxNX))
if maxNY >0:
ax.yaxis.set_major_locator(mpl.ticker.MaxNLocator(maxNY))
if xScientific:
# formx = plt.FormatStrFormatter('%.3e')
formx = plt.ScalarFormatter()
formx.set_powerlimits([powerLimits[0], powerLimits[1]])
formx.set_scientific(True)
ax.xaxis.set_major_formatter(formx)
# http://matplotlib.1069221.n5.nabble.com/ScalarFormatter-td28042.html
# http://matplotlib.org/api/ticker_api.html
# http://matplotlib.org/examples/pylab_examples/newscalarformatter_demo.html
# ax.xaxis.set_major_formatter( plt.FormatStrFormatter('%d'))
# http://matplotlib.org/1.3.1/api/axes_api.html#matplotlib.axes.Axes.ticklabel_format
# plt.ticklabel_format(style='sci', axis='x',
# scilimits=(powerLimits[0], powerLimits[1]))
if yScientific:
formy = plt.ScalarFormatter()
formy.set_powerlimits([powerLimits[2], powerLimits[3]])
formy.set_scientific(True)
ax.yaxis.set_major_formatter(formy)
# this user-defined format setting is given at the end of the function.
# # override the format with user defined
# if xAxisFmt is not None:
# ax.xaxis.set_major_formatter(FormatStrFormatter(xAxisFmt))
# if yAxisFmt is not None:
# ax.yaxis.set_major_formatter(FormatStrFormatter(yAxisFmt))
###############################stacked plot #######################
if plotcommand==ax.stackplot:
if not self.useplotly:
if not plotCol:
plotCol = [self.nextPlotCol() for col in range(0,yy.shape[0])]
ax.stackplot(xx.reshape(-1), yy, colors=plotCol)
ax.margins(0, 0) # Set margins to avoid "whitespace"
# creating the legend manually
ax.legend([mpl.patches.Patch(color=col) for col in plotCol], label,
loc=legendLoc, framealpha=legendAlpha)
else: #Plotly stacked plot
#Plotly stacked plot variables
PLXAxis = 0
PLYAxis = 0
for i in range(yy.shape[0]):
PLXAxis = dict(type='category',)
PLYAxis = dict(type='linear')
try:
if len(y[0,:]) > 1:
self.Plotlydata.append(Scatter(x=x, y=y[i,:]+PLyStatic[0],mode='lines', name = label,fill='tonexty',line = dict(color = self.PLcolor, width = self.PLwidth, dash = self.PLdash)))
PLyStatic[0] += y[i,:]
elif len(x[0,:]) > 1:
self.Plotlydata.append(Scatter(x=x[:,i], y=y,mode='lines', name = label,fill='tonexty',line = dict(color = self.PLcolor, width = self.PLwidth, dash = self.PLdash)))
except:
self.Plotlydata.append(Scatter(x=x, y=y, name = label,fill='tonexty',line = dict(color = self.PLcolor, width = self.PLwidth, dash = self.PLdash)))
###############################line plot #######################
else: # not a stacked plot
for i in range(yy.shape[1]):
#set up the line style, either given or next in sequence
mmrk = ''
if markers:
if i >= len(markers):
mmrk = markers[-1]
else:
mmrk = markers[i]
if plotCol:
if i >= len(plotCol):
col = plotCol[-1]
else:
col = plotCol[i]
else:
col = self.nextPlotCol()
if markerfacecolor==True:
markerfacecolor = col
elif markerfacecolor is None:
markerfacecolor='none'
else:
pass # keep as is
if markeredgecolor==True:
markeredgecolor = col
elif markeredgecolor is None:
markeredgecolor='none'
else:
pass # keep as is
if linestyle is None:
linestyleL = '-'
else:
if type(linestyle) == type([1]):
linestyleL = linestyle[i]
else:
linestyleL = linestyle
if zorders:
if len(zorders) > 1:
zorder = zorders[i]
else:
zorder = zorders[0]
else:
zorder = 2
if not self.useplotly:
if not label:
if linewidths is not None:
plotcommand(xx, yy[:, i], col, label=None, linestyle=linestyleL,
markerfacecolor=markerfacecolor,markeredgecolor=markeredgecolor,
marker=mmrk, markevery=markevery, linewidth=linewidths[i],
clip_on=clip_on, zorder=zorder)
else:
plotcommand(xx, yy[:, i], col, label=None, linestyle=linestyleL,
markerfacecolor=markerfacecolor,markeredgecolor=markeredgecolor,
marker=mmrk, markevery=markevery,
clip_on=clip_on, zorder=zorder)
else:
if linewidths is not None:
# print('***************',linewidths)
line, = plotcommand(xx,yy[:,i],col,#label=label[i],
linestyle=linestyleL,
markerfacecolor=markerfacecolor,markeredgecolor=markeredgecolor,
marker=mmrk, markevery=markevery, linewidth=linewidths[i],
clip_on=clip_on, zorder=zorder)
else:
line, = plotcommand(xx,yy[:,i],col,#label=label[i],
linestyle=linestyleL,
markerfacecolor=markerfacecolor,markeredgecolor=markeredgecolor,
marker=mmrk, markevery=markevery,
clip_on=clip_on, zorder=zorder)
line.set_label(label[i])
leg = ax.legend( loc=legendLoc, fancybox=True,fontsize=labelfsize)
leg.get_frame().set_alpha(legendAlpha)
# ax.legend()
self.bbox_extra_artists.append(leg)
else:#Plotly plots
if 'loglog' in str(plotcommand):
PLXAxis = dict(type='log',showgrid=drawGrid,zeroline=False,nticks=20,showline=True,title=xlabel,mirror='all')
PLYAxis = dict(type='log',showgrid=drawGrid,zeroline=False,nticks=20,showline=True,title=ylabel,mirror='all')
# Assuming that either y or x has to 1
try:
if len(x[0,:]) > 1:
self.Plotlydata.append(Scatter(x=x[:,i], y=y, name = label,line = dict(color = self.PLcolor, width = self.PLwidth, dash = self.PLdash)))
elif len(y[0,:]) > 1:
self.Plotlydata.append(Scatter(x=x[:,0], y=y[:,i], name = label,line = dict(color = self.PLcolor, width = self.PLwidth, dash = self.PLdash)))
except:
self.Plotlydata.append(Scatter(x=x, y=y, name = label,line = dict(color = self.PLcolor, width = self.PLwidth, dash = self.PLdash)))
# Append axis and plot titles
if self.ncol > 1:
self.PlotlySubPlotNumbers.append(plotnum)
self.PlotlyXaxisTitles.append(xlabel)
self.PlotlyYaxisTitles.append(ylabel)
elif self.nrow > 1 :
self.PlotlySubPlotNumbers.append(plotnum)
self.PlotlyXaxisTitles.append(xlabel)
self.PlotlyYaxisTitles.append(ylabel)
elif 'semilogx' in str(plotcommand):
PLXAxis = dict(type='log',showgrid=drawGrid,zeroline=False,nticks=20,showline=True,title=xlabel,mirror='all')
PLYAxis = dict(showgrid=drawGrid,zeroline=False,nticks=20,showline=True,title=ylabel,mirror='all')
# Assuming that either y or x has to 1
try:
if len(x[0,:]) > 1:
self.Plotlydata.append(Scatter(x=x[:,i], y=y, name = label,line = dict(color = self.PLcolor, width = self.PLwidth, dash = self.PLdash)))
elif len(y[0,:]) > 1:
self.Plotlydata.append(Scatter(x=x[:,0], y=y[:,i], name = label,line = dict(color = self.PLcolor, width = self.PLwidth, dash = self.PLdash)))
except:
self.Plotlydata.append(Scatter(x=x, y=y, name = label,line = dict(color = self.PLcolor, width = self.PLwidth, dash = self.PLdash)))
# Append axis and plot titles
if self.ncol > 1:
self.PlotlySubPlotNumbers.append(plotnum)
self.PlotlyXaxisTitles.append(xlabel)
self.PlotlyYaxisTitles.append(ylabel)
elif self.nrow > 1 :
self.PlotlySubPlotNumbers.append(plotnum)
self.PlotlyXaxisTitles.append(xlabel)
self.PlotlyYaxisTitles.append(ylabel)
elif 'semilogy' in str(plotcommand):
PLXAxis = dict(showgrid=drawGrid,zeroline=False,nticks=20,showline=True,title=xlabel,mirror='all')
PLYAxis = dict(type='log',showgrid=drawGrid,zeroline=False,nticks=20,showline=True,title=ylabel,mirror='all')
# Assuming that either y or x has to 1
try:
if len(x[0,:]) > 1:
self.Plotlydata.append(Scatter(x=x[:,i], y=y, name = label,line = dict(color = self.PLcolor, width = self.PLwidth, dash = self.PLdash)))
elif len(y[0,:]) > 1:
self.Plotlydata.append(Scatter(x=x[:,0], y=y[:,i], name = label,line = dict(color = self.PLcolor, width = self.PLwidth, dash = self.PLdash)))
except:
self.Plotlydata.append(Scatter(x=x, y=y, name = label,line = dict(color = self.PLcolor, width = self.PLwidth, dash = self.PLdash)))
# Append axis and plot titles
if self.ncol > 1:
self.PlotlySubPlotNumbers.append(plotnum)
self.PlotlyXaxisTitles.append(xlabel)
self.PlotlyYaxisTitles.append(ylabel)
elif self.nrow > 1 :
self.PlotlySubPlotNumbers.append(plotnum)
self.PlotlyXaxisTitles.append(xlabel)
self.PlotlyYaxisTitles.append(ylabel)
else:
PLXAxis = dict(showgrid=drawGrid,zeroline=False,nticks=20,showline=True,title=xlabel,mirror='all')
PLYAxis = dict(showgrid=drawGrid,zeroline=False,nticks=20,showline=True,title=ylabel,mirror='all')
# Assuming that either y or x has to 1
try:
if len(x[0,:]) > 1:
self.Plotlydata.append(Scatter(x=x[:,i], y=y, name = label,xaxis='x1',
line = dict(color = self.PLcolor, width = self.PLwidth, dash = self.PLdash)))
elif len(y[0,:]) > 1:
self.Plotlydata.append(Scatter(x=x[:,0], y=y[:,i], name = label,xaxis='x1',
line = dict(color = self.PLcolor, width = self.PLwidth, dash = self.PLdash)))
except:
self.Plotlydata.append(Scatter(x=x, y=y, name = label,xaxis='x1',
line = dict(color = self.PLcolor, width = self.PLwidth, dash = self.PLdash)))
# Append axis and plot titles
if self.ncol > 1:
self.PlotlySubPlotNumbers.append(plotnum)
self.PlotlyXaxisTitles.append(xlabel)
self.PlotlyYaxisTitles.append(ylabel)
elif self.nrow > 1 :
self.PlotlySubPlotNumbers.append(plotnum)
self.PlotlyXaxisTitles.append(xlabel)
self.PlotlyYaxisTitles.append(ylabel)
#Plotly plots setup
if self.useplotly:
if self.PLmultipleYAxis:
self.Plotlylayout.append(Layout(showlegend = True,title = ptitle,xaxis = PLXAxis,yaxis=PLYAxis,yaxis2=dict(title=self.PLmultiAxisTitle,side=self.PLyAxisSide,overlaying=self.PLyAxisOverlaying)))
elif self.PLmultipleXAxis:
self.Plotlylayout.append(Layout(showlegend = True,title = ptitle,yaxis=PLYAxis,xaxis = PLXAxis,xaxis2=dict(title=self.PLmultiAxisTitle,side=self.PLxAxisSide,overlaying=self.PLxAxisOverlaying)))
else:
self.Plotlylayout.append(Layout(showlegend = True,title = ptitle,xaxis = PLXAxis,yaxis=PLYAxis))
if self.ncol > 1:
self.PlotlySubPlotTitles.append(ptitle)
self.PlotlySubPlotLabels.append(label)
elif self.nrow > 1:
self.PlotlySubPlotTitles.append(ptitle)
self.PlotlySubPlotLabels.append(label)
if xIsDate:
plt.gcf().autofmt_xdate()
#scale the axes
if pltaxis is not None:
# ax.axis(pltaxis)
if not xIsDate:
ax.set_xlim(pltaxis[0],pltaxis[1])
ax.set_ylim(pltaxis[2],pltaxis[3])
if xTicks is not None:
ticks = ax.set_xticks(list(xTicks.keys()))
ax.set_xticklabels([xTicks[key] for key in xTicks],
rotation=xtickRotation, fontsize=xytickfsize)
if xTicks is None and xtickRotation is not None:
ticks = ax.get_xticks()
if xIsDate:
from datetime import date
ticks = [date.fromordinal(int(tick)).strftime('%Y-%m-%d') for tick in ticks]
ax.set_xticks(ticks) # this is workaround for bug in matplotlib
ax.set_xticklabels(ticks,
rotation=xtickRotation, fontsize=xytickfsize)
if(ptitle is not None):
ax.set_title(ptitle, fontsize=titlefsize)
# minor ticks are two points smaller than major
ax.tick_params(axis='both', which='major', labelsize=xytickfsize)
ax.tick_params(axis='both', which='minor', labelsize=xytickfsize-2)
if yInvert:
ax.set_ylim(ax.get_ylim()[::-1])
if xInvert:
ax.set_xlim(ax.get_xlim()[::-1])
if axesequal:
ax.axis('equal')
# override the format with user defined
if xAxisFmt is not None:
ax.xaxis.set_major_formatter(FormatStrFormatter(xAxisFmt))
if yAxisFmt is not None:
ax.yaxis.set_major_formatter(FormatStrFormatter(yAxisFmt))
return ax
############################################################
#Before this function is called, plot data is accumulated in runtime variables
#At the call of this function the Plotly plots are plotted using the accumulated data.
def plotlyPlot(self,filename=None,image=None,image_filename=None,auto_open=True):
if ((self.nrow == self.ncol) & self.ncol == 1 & self.nrow == 1 ): #No subplots
fig = Figure(data=self.Plotlydata,layout=self.Plotlylayout[0])
fig['layout'].update(title=str(self.figuretitle))
else:
dataFormatCatch = 0
try:
len(self.Plotlydata[0].y[1,:])
dataFormatCatch = 0
except:
dataFormatCatch = 1
if self.PLIs3D:
specRow = []
specCol = []
for r in range(int(self.nrow)):
specRow.append({'is_3d': True})
for r in range(int(self.ncol)):
specCol.append({'is_3d': True})
fig = tools.make_subplots(rows=int(self.nrow), cols=int(self.nrow), specs=[specRow,specCol])#[[{'is_3d': True}, {'is_3d': True}], [{'is_3d': True}, {'is_3d': True}]])
else:
fig = tools.make_subplots(int(self.nrow), int(self.ncol), subplot_titles=self.PlotlySubPlotTitles)
# make row and column formats
rowFormat = []
colFormat = []
countRows = 1
rowCount = 1
colCount = 1
for tmp in range(int(self.nrow)*int(self.ncol)):
if int(self.nrow) == int(self.ncol):
if countRows == int(self.nrow):
rowFormat.append(rowCount)
rowCount = rowCount + 1
if rowCount > int(self.nrow):
rowCount = 1
countRows = 1
elif countRows < int(self.nrow) :
rowFormat.append(rowCount)
countRows = countRows + 1
if colCount == int(self.ncol):
colFormat.append(colCount)
colCount = 1
elif colCount < int(self.ncol):
colFormat.append(colCount)
colCount = colCount + 1
else:
if rowCount > int(self.nrow):
rowCount = 1
rowFormat.append(rowCount)
rowCount = rowCount + 1
else:
rowFormat.append(rowCount)
rowCount = rowCount + 1
if colCount > int(self.ncol):
colCount = 1
colFormat.append(colCount)
colCount = colCount + 1
else:
colFormat.append(colCount)
colCount = colCount + 1
if dataFormatCatch == 0:
for tmp in range(self.PlotlyPlotCalls):
if self.PLIs3D:
if str(self.PLType) == "plot3d":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,mode=self.Plotlydata[i].mode), rowFormat[tmp], colFormat[tmp])
elif str(self.PLType) == "mesh3D":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,color=self.Plotlydata[i].color), rowFormat[tmp], colFormat[tmp])
else:
if str(self.PLType) == "meshContour":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,PLcolorscale=self.Plotlydata[i].PLcolorscale), rowFormat[rIndex-1], colFormat[cIndex-1])
else:
fig.append_trace(self.Plotlydata, rowFormat[tmp], colFormat[tmp])
else:
rCntrl = 1
rIndex = 1
cIndex = 1
cCntrl = 1
rStpVal = int(len(self.Plotlydata)/len(rowFormat))
cStpVal = int(len(self.Plotlydata)/len(colFormat))
for i in range(len(self.Plotlydata)):
if rCntrl > rStpVal:
rCntrl = 1
rIndex = rIndex+1
if cCntrl > cStpVal:
cCntrl = 1
cIndex = cIndex+1
if self.PLIs3D:
if str(self.PLType) == "plot3d":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,mode=self.Plotlydata[i].mode), rowFormat[rIndex-1], colFormat[cIndex-1])
elif str(self.PLType) == "mesh3D":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,color=self.Plotlydata[i].color), rowFormat[rIndex-1], colFormat[cIndex-1])
else:
if str(self.PLType) == "meshContour":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,PLcolorscale=self.Plotlydata[i].PLcolorscale), rowFormat[rIndex-1], colFormat[cIndex-1])
else:
if(len(self.Plotlydata) == len(rowFormat)):
fig.append_trace(self.Plotlydata[i], rowFormat[i], colFormat[i])
else:
fig.append_trace(self.Plotlydata[i], rowFormat[self.PlotlySubPlotNumbers[i]-1], colFormat[self.PlotlySubPlotNumbers[i]-1])
cCntrl = cCntrl + 1
else:
if self.PLIs3D:
if str(self.PLType) == "plot3d":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,mode=self.Plotlydata[i].mode), rowFormat[rIndex-1], colFormat[cIndex-1])
elif str(self.PLType) == "mesh3D":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,color=self.Plotlydata[i].color), rowFormat[rIndex-1], colFormat[cIndex-1])
else:
if str(self.PLType) == "meshContour":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,PLcolorscale=self.Plotlydata[i].PLcolorscale), rowFormat[rIndex-1], colFormat[cIndex-1])
else:
fig.append_trace(self.Plotlydata[i], rowFormat[rIndex-1], colFormat[cIndex-1])
rCntrl = rCntrl + 1
elif cCntrl > cStpVal:
cCntrl = 1
cIndex = cIndex+1
if rCntrl > rStpVal:
rCntrl = 1
rIndex = rIndex+1
if self.PLIs3D:
if str(self.PLType) == "plot3d":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,mode=self.Plotlydata[i].mode), rowFormat[rIndex-1], colFormat[cIndex-1])
elif str(self.PLType) == "mesh3D":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,color=self.Plotlydata[i].color), rowFormat[rIndex-1], colFormat[cIndex-1])
else:
if str(self.PLType) == "meshContour":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,PLcolorscale=self.Plotlydata[i].PLcolorscale), rowFormat[rIndex-1], colFormat[cIndex-1])
else:
fig.append_trace(self.Plotlydata[i], rowFormat[rIndex-1], colFormat[cIndex-1])
rCntrl = rCntrl + 1
else:
if self.PLIs3D:
if str(self.PLType) == "plot3d":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,mode=self.Plotlydata[i].mode), rowFormat[rIndex-1], colFormat[cIndex-1])
elif str(self.PLType) == "mesh3D":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,color=self.Plotlydata[i].color), rowFormat[rIndex-1], colFormat[cIndex-1])
else:
if str(self.PLType) == "meshContour":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,PLcolorscale=self.Plotlydata[i].PLcolorscale), rowFormat[rIndex-1], colFormat[cIndex-1])
else:
fig.append_trace(self.Plotlydata[i], rowFormat[rIndex-1], colFormat[cIndex-1])
cCntrl = cCntrl + 1
else:
if self.PLIs3D:
if str(self.PLType) == "plot3d":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,mode=self.Plotlydata[i].mode), rowFormat[rIndex-1], colFormat[cIndex-1])
elif str(self.PLType) == "mesh3D":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,color=self.Plotlydata[i].color), rowFormat[rIndex-1], colFormat[cIndex-1])
else:
if str(self.PLType) == "meshContour":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,PLcolorscale=self.Plotlydata[i].PLcolorscale), rowFormat[rIndex-1], colFormat[cIndex-1])
else:
fig.append_trace(self.Plotlydata[i], rowFormat[rIndex-1], colFormat[cIndex-1])
rCntrl = rCntrl + 1
cCntrl = cCntrl + 1
fig['layout'].update(title=str(self.figuretitle))
for j in range(self.PlotlyPlotCalls):
if j < len(self.PlotlyXaxisTitles):
fig['layout']['xaxis'+str(j+1)].update(title=self.PlotlyXaxisTitles[j],type=self.Plotlylayout[j].xaxis.type)
else:
fig['layout']['xaxis'+str(j+1)].update(type=self.Plotlylayout[j].xaxis.type)
if j < len(self.PlotlyYaxisTitles):
fig['layout']['yaxis'+str(j+1)].update(title=self.PlotlyYaxisTitles[j],type=self.Plotlylayout[j].yaxis.type)
else:
fig['layout']['yaxis'+str(j+1)].update(type=self.Plotlylayout[j].yaxis.type)
if filename:
offline.plot(fig,filename=filename)
elif image:
offline.plot(fig,image_filename=image_filename,image=image,auto_open=auto_open)
else:
offline.plot(fig)
############################################################
##
def emptyPlot(self,plotnum,projection='rectilinear'):
"""Returns a handler to an empty plot.
This function does not do any plotting, the use must add plots using
the standard MatPlotLib means.
Args:
| plotnum (int): subplot number, 1-based index
| rectilinear (str): type of axes projection, from
['aitoff', 'hammer', 'lambert', 'mollweide', 'polar', 'rectilinear.].
Returns:
| the axis object for the plot
Raises:
| No exception is raised.
"""
pkey = (self.nrow, self.ncol, plotnum)
if pkey not in list(self.subplots.keys()):
self.subplots[pkey] = \
self.fig.add_subplot(self.nrow,self.ncol, plotnum,projection=projection)
ax = self.subplots[pkey]
return ax
############################################################
##
def meshContour(self, plotnum, xvals, yvals, zvals, levels=10,
ptitle=None, xlabel=None, ylabel=None, shading='flat',
plotCol=[], pltaxis=None, maxNX=0, maxNY=0,
xScientific=False, yScientific=False,
powerLimits=[-4, 2, -4, 2], titlefsize=12,
xylabelfsize=12, xytickfsize=10,
meshCmap=cm.rainbow, cbarshow=False, cbarorientation='vertical',
cbarcustomticks=[], cbarfontsize=12,
drawGrid=False, yInvert=False, xInvert=False,
contourFill=True, contourLine=True, logScale=False,
negativeSolid=False, zeroContourLine=None,
contLabel=False, contFmt='%.2f', contCol='k', contFonSz=8, contLinWid=0.5,
zorders=None, PLcolorscale='' ):
"""XY colour mesh countour plot for (xvals, yvals, zvals) input sets.
The data values must be given on a fixed mesh grid of three-dimensional
$(x,y,z)$ array input sets. The mesh grid is defined in $(x,y)$, while the height
of the mesh is the $z$ value.
Given an existing figure, this function plots in a specified subplot position.
Only one contour plot is drawn at a time. Future contours in the same subplot
will cover any previous contours.
The data set must have three two dimensional arrays, each for x, y, and z.
The data in x, y, and z arrays must have matching data points. The x and y arrays
each define the grid in terms of x and y values, i.e., the x array contains the
x values for the data set, while the y array contains the y values. The z array
contains the z values for the corresponding x and y values in the contour mesh.
Z-values can be plotted on a log scale, in which case the colourbar is adjusted
to show true values, but on the nonlinear scale.
The current version only saves png files, since there appears to be a problem
saving eps files.
The xvals and yvals vectors may have non-constant grid-intervals, i.e., they do not
have to be on regular intervals.
Args:
| plotnum (int): subplot number, 1-based index
| xvals (np.array[N,M]): array of x values
| yvals (np.array[N,M]): array of y values
| zvals (np.array[N,M]): values on a (x,y) grid
| levels (int or [float]): number of contour levels or a list of levels (optional)
| ptitle (string): plot title (optional)
| xlabel (string): x axis label (optional)
| ylabel (string): y axis label (optional)
| shading (string): not used currently (optional)
| plotCol ([strings]): plot colour and line style, list with M entries, use default if [] (optional)
| pltaxis ([xmin, xmax, ymin,ymax]): scale for x,y axes. Let Matplotlib decide if None. (optional)
| maxNX (int): draw maxNX+1 tick labels on x axis (optional)
| maxNY (int): draw maxNY+1 tick labels on y axis (optional)
| xScientific (bool): use scientific notation on x axis (optional)
| yScientific (bool): use scientific notation on y axis (optional)
| powerLimits[float]: scientific tick label power limits [x-low, x-high, y-low, y-high] (optional)
| titlefsize (int): title font size, default 12pt (optional)
| xylabelfsize (int): x-axis, y-axis label font size, default 12pt (optional)
| xytickfsize (int): x-axis, y-axis tick font size, default 10pt (optional)
| meshCmap (cm): colour map for the mesh (optional)
| cbarshow (bool): if true, the show a colour bar (optional)
| cbarorientation (string): 'vertical' (right) or 'horizontal' (below) (optional)
| cbarcustomticks zip([z values/float],[tick labels/string])` define custom colourbar ticks locations for given z values(optional)
| cbarfontsize (int): font size for colour bar (optional)
| drawGrid (bool): draw the grid on the plot (optional)
| yInvert (bool): invert the y-axis. Flip the y-axis up-down (optional)
| xInvert (bool): invert the x-axis. Flip the x-axis left-right (optional)
| contourFill (bool): fill contours with colour (optional)
| contourLine (bool): draw a series of contour lines (optional)
| logScale (bool): do Z values on log scale, recompute colourbar values (optional)
| negativeSolid (bool): draw negative contours in solid lines, dashed otherwise (optional)
| zeroContourLine (double): draw a single contour at given value (optional)
| contLabel (bool): label the contours with values (optional)
| contFmt (string): contour label c-printf format (optional)
| contCol (string): contour label colour, e.g., 'k' (optional)
| contFonSz (float): contour label fontsize (optional)
| contLinWid (float): contour line width in points (optional)
| zorders ([int]) list of zorders for drawing sequence, highest is last (optional)
| PLcolorscale (?) Plotly parameter ? (optional)
Returns:
| the axis object for the plot
Raises:
| No exception is raised.
"""
#to rank 2
xx=xvals.reshape(-1, 1)
yy=yvals.reshape(-1, 1)
#if this is a log scale plot
if logScale is True:
zvals = np.log10(zvals)
contour_negative_linestyle = plt.rcParams['contour.negative_linestyle']
if contourLine:
if negativeSolid:
plt.rcParams['contour.negative_linestyle'] = 'solid'
else:
plt.rcParams['contour.negative_linestyle'] = 'dashed'
#create subplot if not existing
if (self.nrow,self.ncol, plotnum) not in list(self.subplots.keys()):
self.subplots[(self.nrow,self.ncol, plotnum)] = \
self.fig.add_subplot(self.nrow,self.ncol, plotnum)
#get axis
ax = self.subplots[(self.nrow,self.ncol, plotnum)]
if drawGrid:
ax.grid(True)
else:
ax.grid(False)
if xlabel is not None:
ax.set_xlabel(xlabel, fontsize=xylabelfsize)
if xScientific:
formx = plt.ScalarFormatter()
formx.set_scientific(True)
formx.set_powerlimits([powerLimits[0], powerLimits[1]])
ax.xaxis.set_major_formatter(formx)
if ylabel is not None:
ax.set_ylabel(ylabel, fontsize=xylabelfsize)
if yScientific:
formy = plt.ScalarFormatter()
formy.set_powerlimits([powerLimits[2], powerLimits[3]])
formy.set_scientific(True)
ax.yaxis.set_major_formatter(formy)
if maxNX >0:
ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator(maxNX))
if maxNY >0:
ax.yaxis.set_major_locator(mpl.ticker.MaxNLocator(maxNY))
if plotCol:
col = plotCol[0]
else:
col = self.nextPlotCol()
if zorders is not None:
if len(zorders) > 1:
zorder = zorders[i]
else:
zorder = zorders[0]
else:
zorder = 2
if self.useplotly:
self.PlotlyPlotCalls = self.PlotlyPlotCalls + 1
self.PLType = "meshContour"
if cbarshow:
self.Plotlydata.append(Contour(x=list(itertools.chain.from_iterable(xvals)),
y=list(itertools.chain.from_iterable(yvals)),
z=list(itertools.chain.from_iterable(zvals)),
PLcolorscale=PLcolorscale))
#,color=color,colorbar = ColorBar(PLtickmode=PLtickmode,nticks=PLnticks,
# PLtick0=PLtick0,PLdtick=PLdtick,PLtickvals=PLtickvals,PLticktext=PLticktext),
# PLcolorscale = PLcolorScale,intensity = PLintensity))
else:
self.Plotlydata.append(Contour(x=list(itertools.chain.from_iterable(xvals)),
y=list(itertools.chain.from_iterable(yvals)),
z=list(itertools.chain.from_iterable(zvals)),PLcolorscale=PLcolorscale))
#,color=color))
# Append axis and plot titles
if self.ncol > 1:
self.PlotlySubPlotNumbers.append(plotnum)
elif self.nrow > 1 :
self.PlotlySubPlotNumbers.append(plotnum)
#do the plot
if contourFill:
pmplotcf = ax.contourf(xvals, yvals, zvals, levels,
cmap=meshCmap, zorder=zorder)
if contourLine:
pmplot = ax.contour(xvals, yvals, zvals, levels, cmap=None, linewidths=contLinWid,
colors=col, zorder=zorder)
if zeroContourLine:
pmplot = ax.contour(xvals, yvals, zvals, (zeroContourLine,), cmap=None, linewidths=contLinWid,
colors=col, zorder=zorder)
if contLabel: # and not contourFill:
plt.clabel(pmplot, fmt = contFmt, colors = contCol, fontsize=contFonSz) #, zorder=zorder)
if cbarshow and (contourFill):
#http://matplotlib.org/mpl_toolkits/axes_grid/users/overview.html#colorbar-whose-height-or-width-in-sync-with-the-master-axes
divider = make_axes_locatable(ax)
if cbarorientation == 'vertical':
cax = divider.append_axes("right", size="5%", pad=0.05)
else:
cax = divider.append_axes("bottom", size="5%", pad=0.1)
if not cbarcustomticks:
# cbar = self.fig.colorbar(pmplotcf,orientation=cbarorientation)
cbar = self.fig.colorbar(pmplotcf,cax=cax)
if logScale:
cbartickvals = cbar.ax.yaxis.get_ticklabels()
tickVals = []
# need this roundabout trick to handle minus sign in unicode
for item in cbartickvals:
valstr = float(item.get_text().replace(u'\N{MINUS SIGN}', '-').replace('$',''))
# valstr = item.get_text().replace('\u2212', '-').replace('$','')
val = 10**float(valstr)
if abs(val) < 1000:
str = '{0:f}'.format(val)
else:
str = '{0:e}'.format(val)
tickVals.append(str)
cbartickvals = cbar.ax.yaxis.set_ticklabels(tickVals)
else:
ticks, ticklabels = list(zip(*cbarcustomticks))
# cbar = self.fig.colorbar(pmplotcf,ticks=ticks, orientation=cbarorientation)
cbar = self.fig.colorbar(pmplotcf,ticks=ticks, cax=cax)
if cbarorientation == 'vertical':
cbar.ax.set_yticklabels(ticklabels)
else:
cbar.ax.set_xticklabels(ticklabels)
if cbarorientation == 'vertical':
for t in cbar.ax.get_yticklabels():
t.set_fontsize(cbarfontsize)
else:
for t in cbar.ax.get_xticklabels():
t.set_fontsize(cbarfontsize)
#scale the axes
if pltaxis is not None:
ax.axis(pltaxis)
if(ptitle is not None):
ax.set_title(ptitle, fontsize=titlefsize)
# minor ticks are two points smaller than major
ax.tick_params(axis='both', which='major', labelsize=xytickfsize)
ax.tick_params(axis='both', which='minor', labelsize=xytickfsize-2)
if yInvert:
ax.set_ylim(ax.get_ylim()[::-1])
if xInvert:
ax.set_xlim(ax.get_xlim()[::-1])
plt.rcParams['contour.negative_linestyle'] = contour_negative_linestyle
if self.useplotly:
if self.PLmultipleYAxis:
if yInvert:
self.Plotlylayout.append(Layout(title = ptitle,xaxis=dict(title=xlabel),yaxis=dict(title=ylabel,autorange='reversed')))
elif xInvert:
self.Plotlylayout.append(Layout(title = ptitle,xaxis=dict(title=xlabel,autorange='reversed'),yaxis=dict(title=ylabel)))
else:
self.Plotlylayout.append(Layout(title = ptitle,xaxis=dict(title=xlabel),yaxis=dict(title=ylabel)))#,font=dict(title=self.PLmultiAxisTitle,side=self.PLyAxisSide,overlaying=self.PLyAxisOverlaying)))
elif self.PLmultipleXAxis:
if yInvert:
self.Plotlylayout.append(Layout(title = ptitle,xaxis=dict(title=xlabel),yaxis=dict(title=ylabel,autorange='reversed')))
elif xInvert:
self.Plotlylayout.append(Layout(title = ptitle,xaxis=dict(title=xlabel,autorange='reversed'),yaxis=dict(title=ylabel)))
else:
self.Plotlylayout.append(Layout(title = ptitle,xaxis=dict(title=xlabel),yaxis=dict(title=ylabel)))#,yaxis=PLYAxis,xaxis = PLXAxis,xaxis2=dict(title=self.PLmultiAxisTitle,side=self.PLxAxisSide,overlaying=self.PLxAxisOverlaying)))
else:
if yInvert:
self.Plotlylayout.append(Layout(title = ptitle,xaxis=dict(title=xlabel),yaxis=dict(title=ylabel,autorange='reversed')))
elif xInvert:
self.Plotlylayout.append(Layout(title = ptitle,xaxis=dict(title=xlabel,autorange='reversed'),yaxis=dict(title=ylabel)))
else:
self.Plotlylayout.append(Layout(title = ptitle,xaxis=dict(title=xlabel),yaxis=dict(title=ylabel)))#,xaxis = PLXAxis,yaxis=PLYAxis))
if self.ncol > 1:
self.PlotlySubPlotTitles.append(ptitle)
self.PlotlyXaxisTitles.append(xlabel)
self.PlotlyYaxisTitles.append(ylabel)
elif self.nrow > 1:
self.PlotlySubPlotTitles.append(ptitle)
self.PlotlyXaxisTitles.append(xlabel)
self.PlotlyYaxisTitles.append(ylabel)
return ax
############################################################
##
def mesh3D(self, plotnum, xvals, yvals, zvals,
ptitle=None, xlabel=None, ylabel=None, zlabel=None,
rstride=1, cstride=1, linewidth=0,
plotCol=None, edgeCol=None, pltaxis=None, maxNX=0, maxNY=0, maxNZ=0,
xScientific=False, yScientific=False, zScientific=False,
powerLimits=[-4, 2, -4, 2, -2, 2], titlefsize=12,
xylabelfsize=12, xytickfsize=10, wireframe=False, surface=True,
cmap=cm.rainbow, cbarshow=False,
cbarorientation = 'vertical', cbarcustomticks=[], cbarfontsize = 12,
drawGrid=True, xInvert=False, yInvert=False, zInvert=False,
logScale=False, alpha=1, alphawire=1,
azim=45, elev=30, distance=10, zorders=None, clip_on=True,
PLcolor=None,
PLcolorScale=None, PLtickmode=None, PLnticks=None, PLtick0=None, PLdtick=None,
PLtickvals=None, PLticktext=None, PLintensity = None
):
"""XY colour mesh plot for (xvals, yvals, zvals) input sets.
Given an existing figure, this function plots in a specified subplot position.
Only one mesh is drawn at a time. Future meshes in the same subplot
will cover any previous meshes.
The mesh grid is defined in (x,y), while the height of the mesh is the z value.
The data set must have three two dimensional arrays, each for x, y, and z.
The data in x, y, and z arrays must have matching data points.
The x and y arrays each define the grid in terms of x and y values, i.e.,
the x array contains the x values for the data set, while the y array
contains the y values. The z array contains the z values for the
corresponding x and y values in the mesh.
Use wireframe=True to obtain a wireframe plot.
Use surface=True to obtain a surface plot with fill colours.
Z-values can be plotted on a log scale, in which case the colourbar is adjusted
to show true values, but on the nonlinear scale.
The xvals and yvals vectors may have non-constant grid-intervals, i.e.,
they do not have to be on regular intervals, but z array must correspond
to the (x,y) grid.
Args:
| plotnum (int): subplot number, 1-based index
| xvals (np.array[N,M]): array of x values, corresponding to (x,y) grid
| yvals (np.array[N,M]): array of y values, corresponding to (x,y) grid
| zvals (np.array[N,M]): array of z values, corresponding to (x,y) grid
| ptitle (string): plot title (optional)
| xlabel (string): x axis label (optional)
| ylabel (string): y axis label (optional)
| zlabel (string): z axis label (optional)
| rstride (int): mesh line row (y axis) stride, every rstride value along y axis (optional)
| cstride (int): mesh line column (x axis) stride, every cstride value along x axis (optional)
| linewidth (float): mesh line width in points (optional)
| plotCol ([strings]): fill colour, list with M=1 entries, use default if None (optional)
| edgeCol ([strings]): mesh line colour , list with M=1 entries, use default if None (optional)
| pltaxis ([xmin, xmax, ymin, ymax]): scale for x,y axes. z scale is not settable. Let Matplotlib decide if None (optional)
| maxNX (int): draw maxNX+1 tick labels on x axis (optional)
| maxNY (int): draw maxNY+1 tick labels on y axis (optional)
| maxNZ (int): draw maxNY+1 tick labels on z axis (optional)
| xScientific (bool): use scientific notation on x axis (optional)
| yScientific (bool): use scientific notation on y axis (optional)
| zScientific (bool): use scientific notation on z-axis (optional)
| powerLimits[float]: scientific tick label power limits [x-neg, x-pos, y-neg, y-pos, z-neg, z-pos] (optional)
| titlefsize (int): title font size, default 12pt (optional)
| xylabelfsize (int): x-axis, y-axis, z-axis label font size, default 12pt (optional)
| xytickfsize (int): x-axis, y-axis, z-axis tick font size, default 10pt (optional)
| wireframe (bool): If True, do a wireframe plot, (optional)
| surface (bool): If True, do a surface plot, (optional)
| cmap (cm): color map for the mesh (optional)
| cbarshow (bool): if true, the show a color bar (optional)
| cbarorientation (string): 'vertical' (right) or 'horizontal' (below) (optional)
| cbarcustomticks zip([z values/float],[tick labels/string]): define custom colourbar ticks locations for given z values(optional)
| cbarfontsize (int): font size for color bar (optional)
| drawGrid (bool): draw the grid on the plot (optional)
| xInvert (bool): invert the x-axis. Flip the x-axis left-right (optional)
| yInvert (bool): invert the y-axis. Flip the y-axis left-right (optional)
| zInvert (bool): invert the z-axis. Flip the z-axis up-down (optional)
| logScale (bool): do Z values on log scale, recompute colourbar vals (optional)
| alpha (float): surface transparency (optional)
| alphawire (float): mesh transparency (optional)
| azim (float): graph view azimuth angle [degrees] (optional)
| elev (float): graph view evelation angle [degrees] (optional)
| distance (float): distance between viewer and plot (optional)
| zorder ([int]) list of zorder for drawing sequence, highest is last (optional)
| clip_on (bool) clips objects to drawing axes (optional)
| PLcolor (string): Graph colors e.g 'FFFFFF'
| PLcolorScale ([int,string]): Color scale for mesh graphs e.g [0, 'rgb(0, 0, 0)']
| PLtickmode (string): Plot mode
| PLnticks (int): number of ticks
| PLtick0 (int): First tick value
| PLdtick (int):
| PLtickvals [int]: Plot intervals
| PLticktext [string]: Plot text
| PLintensity
Returns:
| the axis object for the plot
Raises:
| No exception is raised.
"""
from mpl_toolkits.mplot3d.axes3d import Axes3D
#if this is a log scale plot
if logScale is True:
zvals = np.log10(zvals)
#create subplot if not existing
if (self.nrow,self.ncol, plotnum) not in list(self.subplots.keys()):
self.subplots[(self.nrow,self.ncol, plotnum)] = \
self.fig.add_subplot(self.nrow,self.ncol, plotnum, projection='3d')
#get axis
ax = self.subplots[(self.nrow,self.ncol, plotnum)]
if drawGrid:
ax.grid(True)
else:
ax.grid(False)
if xlabel is not None:
ax.set_xlabel(xlabel, fontsize=xylabelfsize)
if xScientific:
formx = plt.ScalarFormatter()
formx.set_scientific(True)
formx.set_powerlimits([powerLimits[0], powerLimits[1]])
ax.xaxis.set_major_formatter(formx)
if ylabel is not None:
ax.set_ylabel(ylabel, fontsize=xylabelfsize)
if yScientific:
formy = plt.ScalarFormatter()
formy.set_powerlimits([powerLimits[2], powerLimits[3]])
formy.set_scientific(True)
ax.yaxis.set_major_formatter(formy)
if zlabel is not None:
ax.set_zlabel(zlabel, fontsize=xylabelfsize)
if zScientific:
formz = plt.ScalarFormatter()
formz.set_powerlimits([powerLimits[4], powerLimits[5]])
formz.set_scientific(True)
ax.zaxis.set_major_formatter(formz)
if maxNX >0:
ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator(maxNX))
if maxNY >0:
ax.yaxis.set_major_locator(mpl.ticker.MaxNLocator(maxNY))
if maxNZ >0:
ax.zaxis.set_major_locator(mpl.ticker.MaxNLocator(maxNZ))
if plotCol:
col = plotCol[0]
else:
col = self.nextPlotCol()
if edgeCol:
edcol = edgeCol[0]
else:
edcol = self.nextPlotCol()
if zorders:
if len(zorders) > 1:
zorder = zorders[i]
else:
zorder = zorders[0]
else:
zorder = 1
if self.useplotly:
self.PlotlyPlotCalls = self.PlotlyPlotCalls + 1
self.PLIs3D = True
self.PLType = "mesh3D"
if cbarshow:
self.Plotlydata.append(Mesh3d(x=list(itertools.chain.from_iterable(xvals)),
y=list(itertools.chain.from_iterable(yvals)),
z=list(itertools.chain.from_iterable(zvals)),color=PLcolor,
colorbar = ColorBar(PLtickmode=PLtickmode,nticks=PLnticks,
PLtick0=PLtick0,PLdtick=PLdtick,PLtickvals=PLtickvals,PLticktext=PLticktext),
PLcolorscale=PLcolorScale,intensity=PLintensity))
else:
self.Plotlydata.append(Mesh3d(x=list(itertools.chain.from_iterable(xvals)),
y=list(itertools.chain.from_iterable(yvals)),
z=list(itertools.chain.from_iterable(zvals)),color=PLcolor))
# Append axis and plot titles
if self.ncol > 1:
self.PlotlySubPlotNumbers.append(plotnum)
elif self.nrow > 1 :
self.PlotlySubPlotNumbers.append(plotnum)
#do the plot
if surface:
pmplot = ax.plot_surface(xvals, yvals, zvals, rstride=rstride, cstride=cstride,
edgecolor=edcol, cmap=cmap, linewidth=linewidth, alpha=alpha,
zorder=zorder, clip_on=clip_on)
if wireframe:
pmplot = ax.plot_wireframe(xvals, yvals, zvals, rstride=rstride, cstride=cstride,
color=col, edgecolor=edcol, linewidth=linewidth, alpha=alphawire,
zorder=zorder, clip_on=clip_on)
ax.view_init(azim=azim, elev=elev)
ax.dist = distance
if cbarshow is True and cmap is not None:
#http://matplotlib.org/mpl_toolkits/axes_grid/users/overview.html#colorbar-whose-height-or-width-in-sync-with-the-master-axes
# divider = make_axes_locatable(ax)
# if cbarorientation == 'vertical':
# cax = divider.append_axes("right", size="5%", pad=0.05)
# else:
# cax = divider.append_axes("bottom", size="5%", pad=0.1)
if not cbarcustomticks:
cbar = self.fig.colorbar(pmplot,orientation=cbarorientation)
# cbar = self.fig.colorbar(pmplot,cax=cax)
if logScale:
cbartickvals = cbar.ax.yaxis.get_ticklabels()
tickVals = []
# need this roundabout trick to handle minus sign in unicode
for item in cbartickvals:
valstr = item.get_text().replace('\u2212', '-').replace('$','')
val = 10**float(valstr)
if abs(val) < 1000:
str = '{0:f}'.format(val)
else:
str = '{0:e}'.format(val)
tickVals.append(str)
cbartickvals = cbar.ax.yaxis.set_ticklabels(tickVals)
else:
ticks, ticklabels = list(zip(*cbarcustomticks))
cbar = self.fig.colorbar(pmplot,ticks=ticks, orientation=cbarorientation)
# cbar = self.fig.colorbar(pmplot,ticks=ticks, cax=cax)
if cbarorientation == 'vertical':
cbar.ax.set_yticklabels(ticklabels)
else:
cbar.ax.set_xticklabels(ticklabels)
if cbarorientation == 'vertical':
for t in cbar.ax.get_yticklabels():
t.set_fontsize(cbarfontsize)
else:
for t in cbar.ax.get_xticklabels():
t.set_fontsize(cbarfontsize)
if(ptitle is not None):
plt.title(ptitle, fontsize=titlefsize)
#scale the axes
if pltaxis is not None:
# ax.axis(pltaxis)
ax.set_xlim(pltaxis[0], pltaxis[1])
ax.set_ylim(pltaxis[2], pltaxis[3])
ax.set_zlim(pltaxis[4], pltaxis[5])
if(ptitle is not None):
ax.set_title(ptitle, fontsize=titlefsize)
# minor ticks are two points smaller than major
ax.tick_params(axis='both', which='major', labelsize=xytickfsize)
ax.tick_params(axis='both', which='minor', labelsize=xytickfsize-2)
if xInvert:
ax.set_xlim(ax.get_xlim()[::-1])
if yInvert:
ax.set_ylim(ax.get_ylim()[::-1])
if zInvert:
ax.set_zlim(ax.get_zlim()[::-1])
if self.useplotly:
if self.PLmultipleYAxis:
self.Plotlylayout.append(Layout(title = ptitle))
elif self.PLmultipleXAxis:
self.Plotlylayout.append(Layout(title = ptitle))
else:
self.Plotlylayout.append(Layout(title = ptitle))
if self.ncol > 1:
self.PlotlySubPlotTitles.append(ptitle)
self.PlotlyXaxisTitles.append(xlabel)
self.PlotlyYaxisTitles.append(ylabel)
elif self.nrow > 1:
self.PlotlySubPlotTitles.append(ptitle)
self.PlotlyXaxisTitles.append(xlabel)
self.PlotlyYaxisTitles.append(ylabel)
return ax
############################################################
##
def polar(self, plotnum, theta, r, ptitle=None, \
plotCol=None, label=[],labelLocation=[-0.1, 0.1], \
highlightNegative=True, highlightCol='#ffff00', highlightWidth=4,\
legendAlpha=0.0, linestyle=None,\
rscale=None, rgrid=[0,5], thetagrid=[30], \
direction='counterclockwise', zerooffset=0, titlefsize=12, drawGrid=True,
zorders=None, clip_on=True, markers=[], markevery=None,
):
"""Create a subplot and plot the data in polar coordinates (linear radial orginates only).
Given an existing figure, this function plots in a specified subplot position.
The function arguments are described below in some detail. Note that the radial values
or ordinates can be more than one column, each column representing a different
line in the plot. This is convenient if large arrays of data must be plotted. If more
than one column is present, the label argument can contain the legend labels for
each of the columns/lines. The scale for the radial ordinates can be set with rscale.
The number of radial grid circles can be set with rgrid - this provides a somewhat
better control over the built-in radial grid in matplotlib. thetagrids defines the angular
grid interval. The angular rotation direction can be set to be clockwise or
counterclockwise. Likewise, the rotation offset where the plot zero angle must be,
is set with `zerooffset`.
For some obscure reason Matplitlib version 1.13 does not plot negative values on the
polar plot. We therefore force the plot by making the values positive and then highlight it as negative.
Args:
| plotnum (int): subplot number, 1-based index
| theta (np.array[N,] or [N,1]): angular abscissa in radians
| r (np.array[N,] or [N,M]): radial ordinates - could be M columns
| ptitle (string): plot title (optional)
| plotCol ([strings]): plot colour and line style, list with M entries, use default if None (optional)
| label ([strings]): legend label, list with M entries (optional)
| labelLocation ([x,y]): where the legend should located (optional)
| highlightNegative (bool): indicate if negative data must be highlighted (optional)
| highlightCol (string): negative highlight colour string (optional)
| highlightWidth (int): negative highlight line width(optional)
| legendAlpha (float): transparency for legend box (optional)
| linestyle ([str]): line style to be used in plot
| rscale ([rmin, rmax]): radial plotting limits. use default setting if None.
If rmin is negative the zero is a circle and rmin is at the centre of the graph (optional)
| rgrid ([rinc, numinc]): radial grid, use default is [0,5].
If rgrid is None don't show. If rinc=0 then numinc is number of intervals.
If rinc is not zero then rinc is the increment and numinc is ignored (optional)
| thetagrid (float): theta grid interval [degrees], if None don't show (optional)
| direction (string): direction in increasing angle, 'counterclockwise' or 'clockwise' (optional)
| zerooffset (float): rotation offset where zero should be [rad]. Positive
zero-offset rotation is counterclockwise from 3'o'clock (optional)
| titlefsize (int): title font size, default 12pt (optional)
| drawGrid (bool): draw a grid on the graph (optional)
| zorder ([int]) list of zorder for drawing sequence, highest is last (optional)
| clip_on (bool) clips objects to drawing axes (optional)
| markers ([string]) markers to be used for plotting data points (optional)
| markevery (int | (startind, stride)) subsample when using markers (optional)
Returns:
| the axis object for the plot
Raises:
| No exception is raised.
"""
if theta.ndim>1:
tt=theta
else:
if type(theta)==type(pd.Series()):
theta = theta.values
tt=theta.reshape(-1, 1)
if r.ndim>1:
rr=r
else:
if type(r)==type(pd.Series()):
r = r.values
rr=r.reshape(-1, 1)
MakeAbs = True
if rscale is not None:
if rscale[0] < 0:
MakeAbs = False
else:
highlightNegative=True #override the function value
else:
highlightNegative=True #override the function value
#plotCol = self.buildPlotCol(plotCol, rr.shape[1])
ax = None
pkey = (self.nrow, self.ncol, plotnum)
if pkey not in list(self.subplots.keys()):
self.subplots[pkey] = \
self.fig.add_subplot(self.nrow,self.ncol, plotnum, polar=True)
ax = self.subplots[pkey]
ax.grid(drawGrid)
rmax=0
for i in range(rr.shape[1]):
# negative val :forcing positive and phase shifting
# if forceAbsolute:
# ttt = tt + np.pi*(rr[:, i] < 0).reshape(-1, 1)
# rrr = np.abs(rr[:, i])
# else:
ttt = tt.reshape(-1,)
rrr = rr[:, i].reshape(-1,)
#print(rrr)
if highlightNegative:
#find zero crossings in data
zero_crossings = np.where(np.diff(np.sign(rr),axis=0))[0] + 1
#split the input into different subarrays according to crossings
negrrr = np.split(rr,zero_crossings)
negttt = np.split(tt,zero_crossings)
# print('zero crossing',zero_crossings)
# print(len(negrrr))
# print(negrrr)
mmrk = ''
if markers:
if i >= len(markers):
mmrk = markers[-1]
else:
mmrk = markers[i]
#set up the line style, either given or next in sequence
if plotCol:
col = plotCol[i]
else:
col = self.nextPlotCol()
if linestyle is None:
linestyleL = '-'
else:
if type(linestyle) == type([1]):
linestyleL = linestyle[i]
else:
linestyleL = linestyle
# print('p',ttt.shape)
# print('p',rrr.shape)
if zorders:
if len(zorders) > 1:
zorder = zorders[i]
else:
zorder = zorders[0]
else:
zorder = 2
if not label:
if highlightNegative:
lines = ax.plot(ttt, rrr, col, clip_on=clip_on, zorder=zorder,marker=mmrk, markevery=markevery,linestyle=linestyleL)
neglinewith = highlightWidth*plt.getp(lines[0],'linewidth')
for ii in range(0,len(negrrr)):
if len(negrrr[ii]) > 0:
if negrrr[ii][0] < 0:
if MakeAbs:
ax.plot(negttt[ii], np.abs(negrrr[ii]), highlightCol,
linewidth=neglinewith, clip_on=clip_on, zorder=zorder,
marker=mmrk, markevery=markevery,linestyle=linestyleL)
else:
ax.plot(negttt[ii], negrrr[ii], highlightCol,
linewidth=neglinewith, clip_on=clip_on, zorder=zorder,
marker=mmrk, markevery=markevery,linestyle=linestyleL)
ax.plot(ttt, rrr, col, clip_on=clip_on, zorder=zorder,marker=mmrk, markevery=markevery,linestyle=linestyleL)
rmax = np.maximum(np.abs(rrr).max(), rmax)
rmin = 0
else:
if highlightNegative:
lines = ax.plot(ttt, rrr, col, clip_on=clip_on, zorder=zorder,marker=mmrk, markevery=markevery,linestyle=linestyleL)
neglinewith = highlightWidth*plt.getp(lines[0],'linewidth')
for ii in range(0,len(negrrr)):
if len(negrrr[ii]) > 0:
# print(len(negrrr[ii]))
# if negrrr[ii][0] < 0:
if negrrr[ii][0][0] < 0:
if MakeAbs:
ax.plot(negttt[ii], np.abs(negrrr[ii]), highlightCol,
linewidth=neglinewith, clip_on=clip_on, zorder=zorder,
marker=mmrk, markevery=markevery,linestyle=linestyleL)
else:
ax.plot(negttt[ii], negrrr[ii], highlightCol,
linewidth=neglinewith, clip_on=clip_on, zorder=zorder,
marker=mmrk, markevery=markevery,linestyle=linestyleL)
ax.plot(ttt, rrr, col,label=label[i], clip_on=clip_on, zorder=zorder,marker=mmrk, markevery=markevery,linestyle=linestyleL)
rmax=np.maximum(np.abs(rrr).max(), rmax)
rmin = 0
if MakeAbs:
ax.plot(ttt, np.abs(rrr), col, clip_on=clip_on, zorder=zorder,marker=mmrk, markevery=markevery,linestyle=linestyleL)
else:
ax.plot(ttt, rrr, col, clip_on=clip_on, zorder=zorder,marker=mmrk, markevery=markevery,linestyle=linestyleL)
#Plotly polar setup
if self.useplotly:
# Assuming that either y or x has to 1
if thetagrid is None:
tt=tt*(180.0/(np.pi))
else:
tt=tt*(180.0/(np.pi*(thetagrid[0]/(-4.62*i+5))))
try:
if len(r[0,:]) > 1:
self.Plotlydata.append(Scatter(r=rr[:,i], t=tt[:,0], name = label,mode='lines'))
elif len(theta[0,:]) > 1:
self.Plotlydata.append(Scatter(r=rr[:,0], t=tt[:,i], name = label,mode='lines'))
else:
self.Plotlydata.append(Scatter(r=rr[:,0], t=tt[:,0], name = label,mode='lines'))
except:
self.Plotlydata.append(Scatter(r=rr[:,0], t=tt[:,0], name = label))
# Append axis and plot titles
if self.ncol > 1:
self.PlotlySubPlotNumbers.append(plotnum)
elif self.nrow > 1 :
self.PlotlySubPlotNumbers.append(plotnum)
if label:
fontP = mpl.font_manager.FontProperties()
fontP.set_size('small')
leg = ax.legend(loc='upper left',
bbox_to_anchor=(labelLocation[0], labelLocation[1]),
prop = fontP, fancybox=True)
leg.get_frame().set_alpha(legendAlpha)
self.bbox_extra_artists.append(leg)
ax.set_theta_direction(direction)
ax.set_theta_offset(zerooffset)
#set up the grids
if thetagrid is None:
ax.set_xticklabels([])
else:
plt.thetagrids(list(range(0, 360, thetagrid[0])))
#Set increment and maximum radial limits
if rscale is None:
rscale = [rmin, rmax]
if rgrid is None:
ax.set_yticklabels([])
else:
if rgrid[0] == 0:
ax.set_yticks(np.linspace(rscale[0],rscale[1],int(rgrid[1])))
if rgrid[0] != 0:
numrgrid = (rscale[1] - rscale[0] ) / rgrid[0]
ax.set_yticks(np.linspace(rscale[0],rscale[1],int(numrgrid+1.000001)))
ax.set_ylim(rscale[0],rscale[1])
if(ptitle is not None):
ax.set_title(ptitle, fontsize=titlefsize, \
verticalalignment ='bottom', horizontalalignment='center')
if self.useplotly:
if self.PLmultipleYAxis:
self.Plotlylayout.append(Layout(showlegend = True,title = ptitle,orientation=+90))
elif self.PLmultipleXAxis:
self.Plotlylayout.append(Layout(showlegend = True,title = ptitle,orientation=+90))
else:
self.Plotlylayout.append(Layout(showlegend = True,title = ptitle,orientation=+90))
if self.ncol > 1:
self.PlotlySubPlotTitles.append(ptitle)
self.PlotlySubPlotLabels.append(label)
elif self.nrow > 1:
self.PlotlySubPlotTitles.append(ptitle)
self.PlotlySubPlotLabels.append(label)
return ax
############################################################
##
def showImage(self, plotnum, img, ptitle=None, xlabel=None, ylabel=None,
cmap=plt.cm.gray, titlefsize=12, cbarshow=False,
cbarorientation = 'vertical', cbarcustomticks=[], cbarfontsize = 12,
labelfsize=10, xylabelfsize = 12,interpolation=None):
"""Creates a subplot and show the image using the colormap provided.
Args:
| plotnum (int): subplot number, 1-based index
| img (np.ndarray): numpy 2d array containing the image
| ptitle (string): plot title (optional)
| xlabel (string): x axis label (optional)
| ylabel (string): y axis label (optional)
| cmap: matplotlib colormap, default gray (optional)
| fsize (int): title font size, default 12pt (optional)
| cbarshow (bool): if true, the show a colour bar (optional)
| cbarorientation (string): 'vertical' (right) or 'horizontal' (below) (optional)
| cbarcustomticks zip([tick locations/float],[tick labels/string]): locations in image grey levels (optional)
| cbarfontsize (int): font size for colour bar (optional)
| titlefsize (int): title font size, default 12pt (optional)
| xylabelfsize (int): x-axis, y-axis label font size, default 12pt (optional)
| interpolation (str): 'none', 'nearest', 'bilinear', 'bicubic', 'spline16', 'spline36', 'hanning', 'hamming', 'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos'(optional, see pyplot.imshow)
Returns:
| the axis object for the plot
Raises:
| No exception is raised.
"""
#http://matplotlib.sourceforge.net/examples/pylab_examples/colorbar_tick_labelling_demo.html
#http://matplotlib.1069221.n5.nabble.com/Colorbar-Ticks-td21289.html
pkey = (self.nrow, self.ncol, plotnum)
if pkey not in list(self.subplots.keys()):
self.subplots[pkey] = \
self.fig.add_subplot(self.nrow,self.ncol, plotnum)
ax = self.subplots[pkey]
cimage = ax.imshow(img, cmap,interpolation=interpolation)
if xlabel is not None:
ax.set_xlabel(xlabel, fontsize=xylabelfsize)
if ylabel is not None:
ax.set_ylabel(ylabel, fontsize=xylabelfsize)
ax.axis('off')
if cbarshow is True:
#http://matplotlib.org/mpl_toolkits/axes_grid/users/overview.html#colorbar-whose-height-or-width-in-sync-with-the-master-axes
divider = make_axes_locatable(ax)
if cbarorientation == 'vertical':
cax = divider.append_axes("right", size="5%", pad=0.05)
# else:
# cay = divider.append_axes("bottom", size="5%", pad=0.1)
if not cbarcustomticks:
if cbarorientation == 'vertical':
cbar = self.fig.colorbar(cimage,cax=cax)
else:
cbar = self.fig.colorbar(cimage,orientation=cbarorientation)
else:
ticks, ticklabels = list(zip(*cbarcustomticks))
if cbarorientation == 'vertical':
cbar = self.fig.colorbar(cimage,ticks=ticks, cax=cax)
else:
cbar = self.fig.colorbar(cimage,ticks=ticks, orientation=cbarorientation)
if cbarorientation == 'vertical':
cbar.ax.set_yticklabels(ticklabels)
else:
cbar.ax.set_xticklabels(ticklabels)
if cbarorientation == 'vertical':
for t in cbar.ax.get_yticklabels():
t.set_fontsize(cbarfontsize)
else:
for t in cbar.ax.get_xticklabels():
t.set_fontsize(cbarfontsize)
if(ptitle is not None):
ax.set_title(ptitle, fontsize=titlefsize)
return ax
############################################################
##
def plot3d(self, plotnum, x, y, z, ptitle=None, xlabel=None, ylabel=None, zlabel=None,
plotCol=[], linewidths=None, pltaxis=None, label=None, legendAlpha=0.0, titlefsize=12,
xylabelfsize = 12, xInvert=False, yInvert=False, zInvert=False,scatter=False,
markers=None, markevery=None, azim=45, elev=30, zorders=None, clip_on=True, edgeCol=None,
linestyle='-'):
"""3D plot on linear scales for x y z input sets.
Given an existing figure, this function plots in a specified subplot position.
The function arguments are described below in some detail.
Note that multiple 3D data sets can be plotted simultaneously by adding additional
columns to the input coordinates of the (x,y,z) arrays, each set of columns representing
a different line in the plot. This is convenient if large arrays of data must
be plotted. If more than one column is present, the label argument can contain the
legend labels for each of the columns/lines.
Args:
| plotnum (int): subplot number, 1-based index
| x (np.array[N,] or [N,M]) x coordinates of each line.
| y (np.array[N,] or [N,M]) y coordinates of each line.
| z (np.array[N,] or [N,M]) z coordinates of each line.
| ptitle (string): plot title (optional)
| xlabel (string): x-axis label (optional)
| ylabel (string): y-axis label (optional)
| zlabel (string): z axis label (optional)
| plotCol ([strings]): plot colour and line style, list with M entries, use default if None (optional)
| linewidths ([float]): plot line width in points, list with M entries, use default if None (optional)
| pltaxis ([xmin, xmax, ymin, ymax, zmin, zmax]) scale for x,y,z axes. Let Matplotlib decide if None. (optional)
| label ([strings]): legend label for ordinate, list with M entries (optional)
| legendAlpha (float): transparency for legend box (optional)
| titlefsize (int): title font size, default 12pt (optional)
| xylabelfsize (int): x, y, z label font size, default 12pt (optional)
| xInvert (bool): invert the x-axis (optional)
| yInvert (bool): invert the y-axis (optional)
| zInvert (bool): invert the z-axis (optional)
| scatter (bool): draw only the points, no lines (optional)
| markers ([string]): markers to be used for plotting data points (optional)
| markevery (int | (startind, stride)): subsample when using markers (optional)
| azim (float): graph view azimuth angle [degrees] (optional)
| elev (float): graph view evelation angle [degrees] (optional)
| zorder ([int]): list of zorder for drawing sequence, highest is last (optional)
| clip_on (bool): clips objects to drawing axes (optional)
| edgeCol ([int]): list of colour specs, value at [0] used for edge colour (optional).
| linestyle (string): linestyle for this plot (optional)
Returns:
| the axis object for the plot
Raises:
| No exception is raised.
"""
# if required convert 1D arrays into 2D arrays
if type(x)==type(pd.Series()):
x = x.values
if type(y)==type(pd.Series()):
y = y.values
if type(z)==type(pd.Series()):
z = z.values
if x.ndim < 2:
x = x.reshape(-1,1)
if y.ndim < 2:
y = y.reshape(-1,1)
if z.ndim < 2:
z = z.reshape(-1,1)
# if not plotCol:
# plotCol = self.nextPlotCol()
# else:
# plotCol = self.buildPlotCol(plotCol, x.shape[-1])
if linestyle is None:
linestyleL = '-'
else:
if type(linestyle) == type([1]):
linestyleL = linestyle[i]
else:
linestyleL = linestyle
if (self.nrow,self.ncol, plotnum) not in list(self.subplots.keys()):
self.subplots[(self.nrow,self.ncol, plotnum)] = \
self.fig.add_subplot(self.nrow,self.ncol, plotnum, projection='3d')
ax = self.subplots[(self.nrow,self.ncol, plotnum)]
if self.useplotly:
self.PlotlyPlotCalls = self.PlotlyPlotCalls + 1
# print(x.shape[-1])
for i in range(x.shape[-1]):
if plotCol:
if i >= len(plotCol):
col = plotCol[-1]
else:
col = plotCol[i]
else:
col = self.nextPlotCol()
if markers:
marker = markers[i]
else:
marker = None
if zorders:
if len(zorders) > 1:
zorder = zorders[i]
else:
zorder = zorders[0]
else:
zorder = 2
if linewidths is not None:
if scatter:
ax.scatter(x[:,i], y[:,i], z[:,i], c=col, linewidth=linewidths[i],
marker=marker, zorder=zorder, clip_on=clip_on)
else:
ax.plot(x[:,i], y[:,i], z[:,i], c=col, linewidth=linewidths[i],
marker=marker,markevery=markevery, zorder=zorder, clip_on=clip_on,linestyle=linestyleL)
else:
if scatter:
ax.scatter(x[:,i], y[:,i], z[:,i], c=col, marker=marker,
zorder=zorder, clip_on=clip_on)
else:
ax.plot(x[:,i], y[:,i], z[:,i], c=col, marker=marker,
markevery=markevery, zorder=zorder, clip_on=clip_on,linestyle=linestyleL)
# Plotly 3D plot setup
if self.useplotly:
self.PLIs3D = True
self.PLType = "plot3d"
try:
if (len(x[0,:]) > 1) and (len(y[0,:]) > 1) and (len(z[0,:]) > 1):
if len(label) > x.shape[-1]:
self.Plotlydata.append(Scatter3d(x=x[:,i],y=y[:,i],z=z[:,i], name = label,mode='lines'))
else:
self.Plotlydata.append(Scatter3d(x=x[:,i],y=y[:,i],z=z[:,i], name = label[i],mode='lines'))
elif (len(x[0,:]) > 1) and (len(y[0,:]) <= 1) and (len(z[0,:]) <= 1):
if len(label) > x.shape[-1]:
self.Plotlydata.append(Scatter3d(x=x[:,i],y=y[:,0],z=z[:,0], name = label,mode='lines'))
else:
self.Plotlydata.append(Scatter3d(x=x[:,i],y=y[:,0],z=z[:,0], name = label[i],mode='lines'))
elif (len(x[0,:]) <= 1) and (len(y[0,:]) > 1) and (len(z[0,:]) <= 1):
if len(label) > x.shape[-1]:
self.Plotlydata.append(Scatter3d(x=x[:,0],y=y[:,i],z=z[:,0], name = label,mode='lines'))
else:
self.Plotlydata.append(Scatter3d(x=x[:,0],y=y[:,i],z=z[:,0], name = label[i],mode='lines'))
elif (len(x[0,:]) <= 1) and (len(y[0,:]) <= 1) and (len(z[0,:]) > 1):
if len(label) > x.shape[-1]:
self.Plotlydata.append(Scatter3d(x=x[:,0],y=y[:,0],z=z[:,i], name = label,mode='lines'))
else:
self.Plotlydata.append(Scatter3d(x=x[:,0],y=y[:,0],z=z[:,i], name = label[i],mode='lines'))
else:
if len(label) > x.shape[-1]:
self.Plotlydata.append(Scatter3d(x=x[:,0],y=y[:,0],z=z[:,0], name = label,mode='lines'))
else:
self.Plotlydata.append(Scatter3d(x=x[:,0],y=y[:,0],z=z[:,0], name = label[i],mode='lines'))
except:
if len(label) > x.shape[-1]:
self.Plotlydata.append(Scatter3d(x=x[:,0],y=y[:,0],z=z[:,0], name = label,mode='lines'))
else:
self.Plotlydata.append(Scatter3d(x=x[:,0],y=y[:,0],z=z[:,0], name = label[i],mode='lines'))
# Append axis and plot titles
if self.ncol > 1:
self.PlotlySubPlotNumbers.append(plotnum)
elif self.nrow > 1 :
self.PlotlySubPlotNumbers.append(plotnum)
if edgeCol:
edcol = edgeCol
else:
edcol = self.nextPlotCol()
#scale the axes
if pltaxis is not None:
# ax.axis(pltaxis)
# if not xIsDate:
ax.set_xlim(pltaxis[0],pltaxis[1])
ax.set_ylim(pltaxis[2],pltaxis[3])
ax.set_zlim(pltaxis[4],pltaxis[5])
ax.view_init(azim=azim, elev=elev)
if xInvert:
ax.set_xlim(ax.get_xlim()[::-1])
if yInvert:
ax.set_ylim(ax.get_ylim()[::-1])
if zInvert:
ax.set_zlim(ax.get_zlim()[::-1])
if xlabel is not None:
ax.set_xlabel(xlabel, fontsize = xylabelfsize)
if ylabel is not None:
ax.set_ylabel(ylabel, fontsize = xylabelfsize)
if zlabel is not None:
ax.set_zlabel(zlabel, fontsize = xylabelfsize)
if label is not None:
leg = plt.legend(label, loc='best', fancybox=True)
leg.get_frame().set_alpha(legendAlpha)
self.bbox_extra_artists.append(leg)
if(ptitle is not None):
plt.title(ptitle, fontsize=titlefsize)
if self.useplotly:
if self.PLmultipleYAxis:
self.Plotlylayout.append(Layout(showlegend = True,title = ptitle,scene2=dict(camera=dict(up=dict(x=0,y=0,z=1)))))
elif self.PLmultipleXAxis:
self.Plotlylayout.append(Layout(showlegend = True,title = ptitle,scene2=dict(camera=dict(up=dict(x=0,y=0,z=1)))))
else:
self.Plotlylayout.append(Layout(showlegend = True,title = ptitle,scene2=dict(camera=dict(up=dict(x=0,y=0,z=1)))))
if self.ncol > 1:
self.PlotlySubPlotTitles.append(ptitle)
self.PlotlySubPlotLabels.append(label)
elif self.nrow > 1:
self.PlotlySubPlotTitles.append(ptitle)
self.PlotlySubPlotLabels.append(label)
return ax
############################################################
##
def polar3d(self, plotnum, theta, radial, zvals, ptitle=None,
xlabel=None, ylabel=None, zlabel=None, zscale=None,
titlefsize=12, xylabelfsize = 12,
thetaStride=1, radialstride=1, meshCmap = cm.rainbow,
linewidth=0.1, azim=45, elev=30, zorders=None, clip_on=True,
facecolors=None, alpha=1, edgeCol=None):
"""3D polar surface/mesh plot for (r, theta, zvals) input sets.
Given an existing figure, this function plots in a specified subplot position.
Only one mesh is drawn at a time. Future meshes in the same subplot
will cover any previous meshes.
The data in zvals must be on a grid where the theta vector correspond to
the number of rows in zvals and the radial vector corresponds to the
number of columns in zvals.
The r and p vectors may have non-constant grid-intervals, i.e., they do not
have to be on regular intervals.
Args:
| plotnum (int): subplot number, 1-based index
| theta (np.array[N,M]): array of angular values [0..2pi] corresponding to (theta,rho) grid.
| radial (np.array[N,M]): array of radial values corresponding to (theta,rho) grid.
| zvals (np.array[N,M]): array of z values corresponding to (theta,rho) grid.
| ptitle (string): plot title (optional)
| xlabel (string): x-axis label (optional)
| ylabel (string): y-axis label (optional)
| zlabel (string): z-axis label (optional)
| zscale ([float]): z axis [min, max] in the plot.
| titlefsize (int): title font size, default 12pt (optional)
| xylabelfsize (int): x, y, z label font size, default 12pt (optional)
| thetaStride (int): theta stride in input data (optional)
| radialstride (int): radial stride in input data (optional)
| meshCmap (cm): color map for the mesh (optional)
| linewidth (float): width of the mesh lines
| azim (float): graph view azimuth angle [degrees] (optional)
| elev (float): graph view evelation angle [degrees] (optional)
| zorder ([int]) list of zorder for drawing sequence, highest is last (optional)
| clip_on (bool) clips objects to drawing axes (optional)
| facecolors ((np.array[N,M]): array of z value facecolours, corresponding to (theta,rho) grid.
| alpha (float): facecolour surface transparency (optional)
| edgeCol ([int]): list of colour specs, value at [0] used for edge colour (optional).
Returns:
| the axis object for the plot
Raises:
| No exception is raised.
"""
# transform to cartesian system, using meshgrid
Radial,Theta = np.meshgrid(radial,theta)
X,Y = Radial*np.cos(Theta),Radial*np.sin(Theta)
#create subplot if not existing
if (self.nrow,self.ncol, plotnum) not in list(self.subplots.keys()):
self.subplots[(self.nrow,self.ncol, plotnum)] = \
self.fig.add_subplot(self.nrow,self.ncol, plotnum, projection='3d')
#get axis
ax = self.subplots[(self.nrow,self.ncol, plotnum)]
if zorders:
if len(zorders) > 1:
zorder = zorders[i]
else:
zorder = zorders[0]
else:
zorder = 2
if edgeCol:
edcol = edgeCol
else:
edcol = self.nextPlotCol()
#do the plot
if facecolors is not None:
ax.plot_surface(X, Y, zvals, rstride=thetaStride, cstride=radialstride,
linewidth=linewidth, cmap=meshCmap, zorder=zorder, clip_on=clip_on,
facecolors=facecolors, edgecolors=edcol, alpha=alpha)
else:
ax.plot_surface(X, Y, zvals, rstride=thetaStride, cstride=radialstride,
linewidth=linewidth, cmap=meshCmap, zorder=zorder, clip_on=clip_on,
alpha=alpha, edgecolors=edcol)
ax.view_init(azim=azim, elev=elev)
#label and clean up
if zscale==None:
ax.set_zlim3d(np.min(zvals), np.max(zvals))
else:
ax.set_zlim3d(zscale[0], zscale[1])
if xlabel is not None:
ax.set_xlabel(xlabel, fontsize = xylabelfsize)
if ylabel is not None:
ax.set_ylabel(ylabel, fontsize = xylabelfsize)
if zlabel is not None:
ax.set_zlabel(zlabel, fontsize = xylabelfsize)
if(ptitle is not None):
plt.title(ptitle, fontsize=titlefsize)
return ax
############################################################
##
def polarMesh(self, plotnum, theta, radial, zvals, ptitle=None, shading='flat',
radscale=None, titlefsize=12, meshCmap=cm.rainbow, cbarshow=False,
cbarorientation='vertical', cbarcustomticks=[], cbarfontsize=12,
rgrid=[0,5], thetagrid=[30], drawGrid=False,
thetagridfontsize=12, radialgridfontsize=12,
direction='counterclockwise', zerooffset=0, logScale=False,
plotCol=[], levels=10, contourFill=True, contourLine=True,
zeroContourLine=None, negativeSolid=False,
contLabel=False, contFmt='%.2f', contCol='k', contFonSz=8, contLinWid=0.5,
zorders=None, clip_on=True):
"""Polar colour contour and filled contour plot for (theta, r, zvals) input sets.
The data values must be given on a fixed mesh grid of three-dimensional (theta,rho,z)
array input sets (theta is angle, and rho is radial distance). The mesh grid is
defined in (theta,rho), while the height of the mesh is the z value. The
(theta,rho) arrays may have non-constant grid-intervals, i.e., they do not
have to be on regular intervals.
Given an existing figure, this function plots in a specified subplot position.
Only one contour plot is drawn at a time. Future contours in the same subplot
will cover any previous contours.
The data set must have three two dimensional arrays, each for theta, rho, and z.
The data in theta, rho, and z arrays must have matching data points.
The theta and rho arrays each define the grid in terms of theta and rho values,
i.e., the theta array contains the angular values for the data set, while the
rho array contains the radial values. The z array contains the z values for the
corresponding theta and rho values in the contour mesh.
Z-values can be plotted on a log scale, in which case the colourbar is adjusted
to show true values, but on the nonlinear scale.
The current version only saves png files, since there appears to be a problem
saving eps files.
Args:
| plotnum (int): subplot number, 1-based index
| theta (np.array[N,M]) array of angular values [0..2pi] corresponding to (theta,rho) grid.
| radial (np.array[N,M]) array of radial values corresponding to (theta,rho) grid.
| zvals (np.array[N,M]) array of z values corresponding to (theta,rho) grid.
| ptitle (string): plot title (optional)
| shading (string): 'flat' | 'gouraud' (optional)
| radscale ([float]): inner and outer radial scale max in the plot.
| titlefsize (int): title font size, default 12pt (optional)
| meshCmap (cm): color map for the mesh (optional)
| cbarshow (bool): if true, the show a color bar
| cbarorientation (string): 'vertical' (right) or 'horizontal' (below)
| cbarcustomticks zip([tick locations/float],[tick labels/string]): locations in image grey levels
| cbarfontsize (int): font size for color bar
| rgrid ([float]): radial grid - None, [number], [inc,max]
| thetagrid ([float]): angular grid - None, [inc]
| drawGrid (bool): draw the grid on the plot (optional)
| thetagridfontsize (float): font size for the angular grid
| radialgridfontsize (float): font size for the radial grid
| direction (string)= 'counterclockwise' or 'clockwise' (optional)
| zerooffset (float) = rotation offset where zero should be [rad] (optional)
| logScale (bool): do Z values on log scale, recompute colourbar vals
| plotCol ([strings]): plot colour and line style, list with M entries, use default if []
| levels (int or [float]): number of contour levels or a list of levels (optional)
| contourFill (bool): fill contours with colour (optional)
| contourLine (bool): draw a series of contour lines
| zeroContourLine (double): draw a contour at the stated value (optional)
| negativeSolid (bool): draw negative contours in solid lines, dashed otherwise (optional)
| contLabel (bool): label the contours with values (optional)
| contFmt (string): contour label c-printf format (optional)
| contCol (string): contour label colour, e.g., 'k' (optional)
| contFonSz (float): contour label fontsize (optional)
| contLinWid (float): contour line width in points (optional)
| zorder ([int]) list of zorder for drawing sequence, highest is last (optional)
| clip_on (bool) clips objects to drawing axes (optional)
Returns:
| the axis object for the plot
Raises:
| No exception is raised.
"""
# # transform to cartesian system, using meshgrid
# Radial,Theta = np.meshgrid(radial,theta)
# X,Y = Radial*np.cos(Theta),Radial*np.sin(Theta)
#if this is a log scale plot
if logScale is True:
zvals = np.log10(zvals)
contour_negative_linestyle = plt.rcParams['contour.negative_linestyle']
if contourLine:
if negativeSolid:
plt.rcParams['contour.negative_linestyle'] = 'solid'
else:
plt.rcParams['contour.negative_linestyle'] = 'dashed'
#create subplot if not existing
if (self.nrow,self.ncol, plotnum) not in list(self.subplots.keys()):
self.subplots[(self.nrow,self.ncol, plotnum)] = \
self.fig.add_subplot(self.nrow,self.ncol, plotnum, projection='polar')
#get axis
ax = self.subplots[(self.nrow,self.ncol, plotnum)]
if plotCol:
col = plotCol[0]
else:
col = self.nextPlotCol()
if zorders:
if len(zorders) > 1:
zorder = zorders[i]
else:
zorder = zorders[0]
else:
zorder = 2
#do the plot
if contourLine:
pmplot = ax.contour(theta, radial, zvals, levels, cmap=None, linewidths=contLinWid,
colors=col, zorder=zorder, clip_on=clip_on)
if zeroContourLine:
pmplot = ax.contour(theta, radial, zvals, (zeroContourLine,), cmap=None, linewidths=contLinWid,
colors=col, zorder=zorder, clip_on=clip_on)
if contourFill:
pmplot = ax.pcolormesh(theta, radial, zvals, shading=shading, cmap=meshCmap,
zorder=zorder, clip_on=clip_on)
if contLabel:
plt.clabel(pmplot, fmt = contFmt, colors = contCol, fontsize=contFonSz)
ax.grid(drawGrid)
if(ptitle is not None):
plt.title(ptitle, fontsize=titlefsize)
#set up the grids
# add own labels: http://astrometry.net/svn/trunk/projects/masers/py/poster/plot_data.py
# http://matplotlib.org/devel/add_new_projection.html
if thetagrid is None:
plt.thetagrids([])
else:
plt.thetagrids(list(range(0, 360, thetagrid[0])))
plt.tick_params(axis='x', which='major', labelsize=thetagridfontsize)
# plt.thetagrids(radscale[0], radscale[1],5)
if radscale==None:
rscale = [np.min(radial), np.max(radial)]
else:
rscale = radscale
ax.set_ylim(rscale[0],rscale[1])
if rgrid is None:
ax.set_yticklabels([])
else :
#set the number of intervals
if rgrid[0] == 0:
ax.set_yticks(np.linspace(rscale[0],rscale[1],int(rgrid[1])))
#set the interval incremental size
if rgrid[0] != 0:
numrgrid = (rscale[1] - rscale[0] ) / (rgrid[0])
ax.set_yticks(np.linspace(rscale[0],rscale[1],int(numrgrid+1.000001)))
plt.tick_params(axis='y', which='major', labelsize=radialgridfontsize)
ax.set_theta_direction(direction)
ax.set_theta_offset(zerooffset)
if cbarshow is True:
#http://matplotlib.org/mpl_toolkits/axes_grid/users/overview.html#colorbar-whose-height-or-width-in-sync-with-the-master-axes
# this does not work with the polar projection, use gridspec to do this.
# divider = make_axes_locatable(ax)
# if cbarorientation == 'vertical':
# cax = divider.append_axes("right", size="5%", pad=0.05)
# else:
# cax = divider.append_axes("bottom", size="5%", pad=0.1)
if not cbarcustomticks:
cbar = self.fig.colorbar(pmplot,orientation=cbarorientation)
# cbar = self.fig.colorbar(pmplot,cax=cax)
if logScale:
cbartickvals = cbar.ax.yaxis.get_ticklabels()
tickVals = []
# need this roundabout trick to handle minus sign in unicode
for item in cbartickvals:
valstr = item.get_text().replace('\u2212', '-').replace('$','')
val = 10**float(valstr)
if abs(val) < 1000:
str = '{0:f}'.format(val)
else:
str = '{0:e}'.format(val)
tickVals.append(str)
cbartickvals = cbar.ax.yaxis.set_ticklabels(tickVals)
else:
ticks, ticklabels = list(zip(*cbarcustomticks))
cbar = self.fig.colorbar(pmplot,ticks=ticks, orientation=cbarorientation)
# cbar = self.fig.colorbar(pmplot,ticks=ticks, cax=cax)
if cbarorientation == 'vertical':
cbar.ax.set_yticklabels(ticklabels)
else:
cbar.ax.set_xticklabels(ticklabels)
if cbarorientation == 'vertical':
for t in cbar.ax.get_yticklabels():
t.set_fontsize(cbarfontsize)
else:
for t in cbar.ax.get_xticklabels():
t.set_fontsize(cbarfontsize)
plt.rcParams['contour.negative_linestyle'] = contour_negative_linestyle
return ax
############################################################
##
def plotArray(self, plotnum, inarray, slicedim = 0, labels = None,
maxNX=0, maxNY=0, titlefsize = 8, xylabelfsize = 8,
xytickfsize = 8, selectCols=None, sepSpace=0.2,
allPlotCol='r' ):
"""Creates a plot from an input array.
Given an input array with m x n dimensions, this function creates a subplot for vectors
[1-n]. Vector 0 serves as the x-axis for each subplot. The slice dimension can be in
columns (0) or rows (1).
Args:
| plotnum (int): The subplot number, 1-based index, according to Matplotlib conventions.
This value must always be given, even if only a single 1,1 subplot is used.
| inarray (np.array): data series to be plotted. Data direction can be cols or rows.
The abscissa (x axis) values must be the first col/row, with ordinates in following cols/rows.
| slicedim (int): slice along columns (0) or rows (1) (optional).
| labels (list): a list of strings as labels for each subplot.
x=labels[0], y=labels[1:] (optional).
| maxNX (int): draw maxNX+1 tick labels on x axis (optional).
| maxNY (int): draw maxNY+1 tick labels on y axis (optional).
| titlefsize (int): title font size, default 12pt (optional).
| xylabelfsize (int): x-axis, y-axis label font size, default 12pt (optional).
| xytickfsize (int): x-axis, y-axis tick font size, default 10pt (optional).
| selectCols ([int]): select columns for plot. Col 0 corresponds to col 1 in input data
(because col 0 is abscissa),plot all if not given (optional).
| sepSpace (float): vertical spacing between sub-plots in inches (optional).
| allPlotCol (str): make all plot lines this colour (optional).
Returns:
| Nothing
Raises:
| No exception is raised.
"""
#prepare the data
#if slicedim = 0, slice across columns
if slicedim == 0:
pass
elif slicedim == 1:
inarray = inarray.T
x = inarray[:,0]
yAll = inarray[:,1:].transpose()
nestnrow = inarray.shape[1]-1
nestncol = 1
xlabel = labels[0]
ylabels = labels[1:]
## keep track of whether the outer grid was already defined.
#use current subplot number as outer grid reference
ogkey = (self.nrow, self.ncol)
if ogkey not in list(self.gridSpecsOuter.keys()):
self.gridSpecsOuter[ogkey] = \
gridspec.GridSpec(self.nrow,self.ncol, wspace=0, hspace=0)
outer_grid = self.gridSpecsOuter[ogkey]
## keep track of whether the inner grid was already defined.
#inner_grid (nested):
igkey = (self.nrow, self.ncol, plotnum)
if igkey not in list(self.gridSpecsInner.keys()):
self.gridSpecsInner[igkey] = \
gridspec.GridSpecFromSubplotSpec(nestnrow,nestncol,
subplot_spec=outer_grid[plotnum-1],wspace=0, hspace=sepSpace)
inner_grid = self.gridSpecsInner[igkey]
#set up list of all cols if required
if not selectCols:
selectCols = list(range(yAll.shape[0]))
nestplotnum = 0
#create subplot for each y-axis vector
numplots = len(ylabels)
for index,y in enumerate(yAll):
if index in selectCols:
## if this row of array plot in key, else create
rkey = (self.nrow, self.ncol, plotnum, nestplotnum)
if rkey not in list(self.arrayRows.keys()):
self.arrayRows[rkey] = \
plt.Subplot(self.fig, inner_grid[nestplotnum])
self.fig.add_subplot(self.arrayRows[rkey])
nestplotnum = nestplotnum + 1
ax = self.arrayRows[rkey]
# plot the data
if self.useplotly:#Plotly subplot configuration
self.nrow = nestnrow
self.ncol = nestncol
self.plot(plotnum,x,y,allPlotCol)
else:
line, = ax.plot(x,y,allPlotCol)
if ylabels is not None:
doYAxis = False
if doYAxis:
# place on y-axis
ax.set_ylabel(ylabels[index], fontsize=xylabelfsize)
else:
# place as legend
line.set_label(ylabels[index])
leg = ax.legend( loc='best', fancybox=True,fontsize=8)
leg.get_frame().set_alpha(0.1)
# ax.legend()
self.bbox_extra_artists.append(leg)
###align ylabels
### ax.yaxis.set_label_coords(-0.05, 0.5)
#tick label fonts
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(xytickfsize)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(xytickfsize)
if maxNX > 0:
ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator(maxNX))
if maxNY > 0:
ax.yaxis.set_major_locator(mpl.ticker.MaxNLocator(maxNY))
#share x ticklabels and label to avoid clutter and overlapping
plt.setp([a.get_xticklabels() for a in self.fig.axes[:-1]], visible=False)
if xlabel is not None and index==numplots-1:
self.fig.axes[-1].set_xlabel(xlabel, fontsize=xylabelfsize)
# minor ticks are two points smaller than major
# ax.tick_params(axis='both', which='major', labelsize=xytickfsize)
# ax.tick_params(axis='both', which='minor', labelsize=xytickfsize-2)
############################################################
##
def setup_pie_axes(self,fig, rect, thetaAxis, radiusAxis,radLabel='',angLabel='',numAngGrid=5,
numRadGrid=10,drawGrid=True, degreeformatter="%d$^\circ$"):
"""Sets up the axes_grid for the pie plot, not using regulat Matplotlib axes.
http://matplotlib.org/mpl_toolkits/axes_grid/users/overview.html
http://matplotlib.org/mpl_toolkits/axes_grid/api/axis_artist_api.html
http://matplotlib.org/mpl_toolkits/axes_grid/users/axisartist.html
http://matplotlib.org/examples/axes_grid/demo_floating_axes.html
https://fossies.org/dox/matplotlib-1.5.3/classmpl__toolkits_1_1axisartist_1_1angle__helper_1_1FormatterDMS.html
Args:
| fig (matplotlib figure): which figure to use
| rect (matplotlib subaxis): which subplot to use
| thetaAxis ([float]): [min,max] for angular scale
| radiusAxis ([float]): [min,max] for radial scale
| radLabel (str): radial label
| angLabel (str): angular label
| numAngGrid (int): number of ticks on angular grid
| numRadGrid (int): number of ticks on radial grid
| drawGrid (bool): must grid be drawn?
| degreeformatter (str): format string for angular tick labels
Returns:
| the axes and parasitic axes object for the plot
Raises:
| No exception is raised.
"""
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
# Find grid values appropriate for the coordinate (degree).
# The argument is an approximate number of grids.
grid_locator1 = angle_helper.LocatorD(numAngGrid)
# And also use an appropriate formatter:
tick_formatter1 = angle_helper.FormatterDMS()
tick_formatter1.fmt_d = degreeformatter
# set up number of ticks for the r-axis
grid_locator2 = MaxNLocator(numRadGrid)
# the extremes are passed to the function
grid_helper = floating_axes.GridHelperCurveLinear(tr,
extremes=(thetaAxis[0], thetaAxis[1], radiusAxis[0], radiusAxis[1]),
grid_locator1=grid_locator1,
grid_locator2=grid_locator2,
tick_formatter1=tick_formatter1,
tick_formatter2=None,
)
ax1 = floating_axes.FloatingSubplot(fig, rect, grid_helper=grid_helper)
fig.add_subplot(ax1)
# create a parasite axes
aux_ax = ax1.get_aux_axes(tr)
aux_ax.patch = ax1.patch # for aux_ax to have a clip path as in ax
ax1.patch.zorder=0.9 # but this has a side effect that the patch is
# drawn twice, and possibly over some other
# artists. So, we decrease the zorder a bit to
# prevent this.
return ax1, aux_ax
############################################################
##
def pie(self, plotnum,theta,radius, ptitle=None,angLabel='',radLabel='',
thetaAxis=[0,360.],radiusAxis=[0,1],plotCol=[],
linewidths=None, label=[], legendAlpha=0.0,
legendLoc='best',
linestyle=None,
titlefsize = 12,
numAngGrid=5, numRadGrid=10,
labelfsize=10, drawGrid=True,
markers=[],markevery=None,
radangfsize = 12,
xytickfsize = 10,
zorders=None, clip_on=True,
degreeformatter="%d$^\circ$" ):
"""Plots data in pie section on a polar grid.
Args:
| plotnum (int): subplot number, 1-based index
| theta (np.array[N,] or [N,M]): angular data set in degrees - could be M columns
| radius (np.array[N,] or [N,M]): radial data set - could be M columns
| ptitle (string): plot title (optional)
| angLabel (string): angular axis label (optional)
| radLabel (string): radial axis label (optional)
| thetaAxis ([minAngle, maxAnlge]): the angular extent to be displayed, degrees (optional)
| radiusAxis ([minRad, maxRad]): the radial extent to be displayed, degrees (optional)
| plotCol ([strings]): plot colour and line style, list with M entries, use default if [] (optional)
| linewidths ([float]): plot line width in points, list with M entries, use default if None (optional)
| label ([strings]): legend label for ordinate, list with M entries
| legendAlpha (float): transparency for legend box
| legendLoc (string): location for legend box (optional)
| linestyle (string): linestyle for this plot (optional)
| titlefsize (int): title font size, default 12pt (optional)
| numAngGrid (int): number of grid or tick marks along angular extent
| numRadGrid (int): number of grid or tick marks along angular extent
| labelfsize (int): label/legend font size, default 10pt (optional)
| drawGrid (bool): draw the grid on the plot (optional)
| markers ([string]) markers to be used for plotting data points (optional)
| markevery (int | (startind, stride)) subsample when using markers (optional)
| radangfsize (int): x-axis, y-axis label font size, default 12pt (optional)
| xytickfsize (int): x-axis, y-axis tick font size, default 10pt (optional)
| zorders ([int]) list of zorder for drawing sequence, highest is last (optional)
| clip_on (bool) clips objects to drawing axes (optional)
| degreeformatter (str) format string to defie the angular tick labels (optional)
Returns:
| the axis object for the plot
Raises:
| No exception is raised.
"""
pkey = (self.nrow, self.ncol, plotnum)
if pkey not in list(self.subplots.keys()):
ax, aux_ax1 = self.setup_pie_axes(self.fig, '{}{}{}'.format(*pkey),
thetaAxis, radiusAxis,
radLabel=radLabel,angLabel=angLabel,numAngGrid=numAngGrid,
numRadGrid=numRadGrid,drawGrid=drawGrid,degreeformatter=degreeformatter)
self.subplots[pkey] = (ax,aux_ax1)
else:
(ax,aux_ax1) = self.subplots[pkey]
# reshape input dataset into rank 2
xx = theta if theta.ndim>1 else theta.reshape(-1, 1)
yy = radius if radius.ndim>1 else radius.reshape(-1, 1)
ax.grid(drawGrid)
for i in range(yy.shape[1]):
#set up the line style, either given or next in sequence
mmrk = ''
if markers:
mmrk = markers[-1] if i >= len(markers) else markers[i]
if plotCol:
if i >= len(plotCol):
col = plotCol[-1]
else:
col = plotCol[i]
else:
col = self.nextPlotCol()
if linestyle is None:
linestyleL = '-'
else:
if type(linestyle) == type([1]):
linestyleL = linestyle[i]
else:
linestyleL = linestyle
if zorders:
if len(zorders) > 1:
zorder = zorders[i]
else:
zorder = zorders[0]
else:
zorder = 2
if not label:
if linewidths is not None:
line = aux_ax1.plot(xx[:, i], yy[:, i], col, label=None, linestyle=linestyleL,
marker=mmrk, markevery=markevery, linewidth=linewidths[i],
clip_on=clip_on, zorder=zorder)
else:
line = aux_ax1.plot(xx[:, i], yy[:, i], col, label=None, linestyle=linestyleL,
marker=mmrk, markevery=markevery,
clip_on=clip_on, zorder=zorder)
line = line[0]
else:
if linewidths is not None:
line = aux_ax1.plot(xx[:, i],yy[:,i],col,#label=label[i],
linestyle=linestyleL,
marker=mmrk, markevery=markevery, linewidth=linewidths[i],
clip_on=clip_on, zorder=zorder)
else:
line = aux_ax1.plot(xx[:, i],yy[:,i],col,#label=label[i],
linestyle=linestyleL,
marker=mmrk, markevery=markevery,
clip_on=clip_on, zorder=zorder)
line = line[0]
line.set_label(label[i])
leg = aux_ax1.legend( loc=legendLoc, fancybox=True,fontsize=labelfsize)
leg.get_frame().set_alpha(legendAlpha)
# aux_ax1.legend()
self.bbox_extra_artists.append(leg)
if(ptitle is not None):
ax.set_title(ptitle, fontsize=titlefsize)
# adjust axis
# the axis artist lets you call axis with
# "bottom", "top", "left", "right"
# radial axis scale are the left/right of the graph
# draw labels outside the grapgh
if thetaAxis[0] > 90 and thetaAxis[0] < 270:
ax.axis["left"].set_visible(False)
ax.axis["right"].set_visible(True)
ax.axis["right"].set_axis_direction("top")
ax.axis["right"].toggle(ticklabels=True, label=True)
ax.axis["right"].major_ticklabels.set_axis_direction("bottom")
ax.axis["right"].label.set_axis_direction("bottom")
#set radial label
ax.axis["right"].label.set_text(radLabel)
ax.axis["right"].label.set_size(radangfsize)
aux_ax1.plot(np.array([thetaAxis[0],thetaAxis[0]]),
np.array([radiusAxis[0],radiusAxis[1]]),'k',linewidth=2.5)
ax.axis["right"].major_ticklabels.set_size(xytickfsize)
ax.axis["right"].minor_ticklabels.set_size(xytickfsize-2)
else:
# ax.axis["right"].set_visible(False)
ax.axis["left"].set_axis_direction("bottom")
# ax.axis["right"].set_axis_direction("top")
#set radial label
ax.axis["left"].label.set_text(radLabel)
ax.axis["left"].label.set_size(radangfsize)
ax.axis["left"].major_ticklabels.set_size(xytickfsize)
ax.axis["left"].minor_ticklabels.set_size(xytickfsize-2)
# angular axis scale are top / bottom
ax.axis["bottom"].set_visible(False)
ax.axis["top"].set_axis_direction("bottom")
ax.axis["top"].toggle(ticklabels=True, label=True)
ax.axis["top"].major_ticklabels.set_axis_direction("top")
ax.axis["top"].label.set_axis_direction("top")
#set angular label
ax.axis["top"].label.set_text(angLabel)
ax.axis["top"].label.set_size(radangfsize)
ax.axis["top"].major_ticklabels.set_size(xytickfsize)
ax.axis["top"].minor_ticklabels.set_size(xytickfsize-2)
#draw the inner grif boundary,somehow not done by matplotlib
# if radiusAxis[0] > 0.:
numqi = 20
thqi = np.linspace(thetaAxis[0], thetaAxis[1],numqi)
raqi = np.linspace(radiusAxis[0],radiusAxis[0],numqi)
aux_ax1.plot(thqi,raqi,'k',linewidth=2.5)
return aux_ax1
################################################################
################################################################
##
## plot graphs and confirm the correctness of the functions
from contextlib import contextmanager
@contextmanager
def savePlot(fignumber=0,subpltnrow=1,subpltncol=1,
figuretitle=None, figsize=(9,9), saveName=None):
"""Uses 'with' statement to create a plot and save to file on exit.
Use as follows::
x=np.linspace(-3,3,20)
with savePlot(1,saveName=['testwith.png','testwith.eps']) as p:
p.plot(1,x,x*x)
where the savePlot parameters are exactly the same as ``Plotter``,
except that a new named parameter ``saveName`` is now present.
If ``saveName`` is not ``None``, the list of filenames is used to
save files of the plot (any number of names/types)
Args:
| fignumber (int): the plt figure number, must be supplied
| subpltnrow (int): subplot number of rows
| subpltncol (int): subplot number of columns
| figuretitle (string): the overall heading for the figure
| figsize ((w,h)): the figure size in inches
| saveName str or [str]: string or list of save filenames
Returns:
| The plotting object, used to populate the plot (see example)
Raises:
| No exception is raised.
"""
p = Plotter(fignumber,subpltnrow,subpltncol, figuretitle, figsize)
try:
yield p
finally:
if saveName is not None:
if isinstance(saveName, str):
p.saveFig(filename=saveName)
else:
for fname in saveName:
p.saveFig(filename=fname)
################################################################
##
def cubehelixcmap(start=0.5, rot=-1.5, gamma=1.0, hue=1.2, reverse=False, nlev=256.):
"""
A full implementation of <NAME>'s "cubehelix" for Matplotlib.
Based on the FORTRAN 77 code provided in
<NAME>, 2011, BASI, 39, 289.
http://adsabs.harvard.edu/abs/2011arXiv1108.5083G
http://www.astron-soc.in/bulletin/11June/289392011.pdf
User can adjust all parameters of the cubehelix algorithm.
This enables much greater flexibility in choosing color maps, while
always ensuring the color map scales in intensity from black
to white. A few simple examples:
Default color map settings produce the standard "cubehelix".
Create color map in only blues by setting rot=0 and start=0.
Create reverse (white to black) backwards through the rainbow once
by setting rot=1 and reverse=True.
Args:
| start : scalar, optional
| Sets the starting position in the color space. 0=blue, 1=red,
| 2=green. Defaults to 0.5.
| rot : scalar, optional
| The number of rotations through the rainbow. Can be positive
| or negative, indicating direction of rainbow. Negative values
| correspond to Blue->Red direction. Defaults to -1.5
| gamma : scalar, optional
| The gamma correction for intensity. Defaults to 1.0
| hue : scalar, optional
| The hue intensity factor. Defaults to 1.2
| reverse : boolean, optional
| Set to True to reverse the color map. Will go from black to
| white. Good for density plots where shade~density. Defaults to False
| nevl : scalar, optional
| Defines the number of discrete levels to render colors at.
| Defaults to 256.
Returns:
| matplotlib.colors.LinearSegmentedColormap object
Example:
>>> import cubehelix
>>> cx = cubehelix.cmap(start=0., rot=-0.5)
>>> plot(x,cmap=cx)
Revisions
2014-04 (@jradavenport) Ported from IDL version
source
https://github.com/jradavenport/cubehelix
Licence
Copyright (c) 2014, <NAME> and contributors All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
#-- set up the parameters
fract = np.arange(nlev)/(nlev-1.)
angle = 2.0*np.pi * (start/3.0 + 1.0 + rot*fract)
fract = fract**gamma
amp = hue*fract*(1.-fract)/2.
#-- compute the RGB vectors according to main equations
red = fract+amp*(-0.14861*np.cos(angle)+1.78277*np.sin(angle))
grn = fract+amp*(-0.29227*np.cos(angle)-0.90649*np.sin(angle))
blu = fract+amp*(1.97294*np.cos(angle))
#-- find where RBB are outside the range [0,1], clip
red[np.where((red > 1.))] = 1.
grn[np.where((grn > 1.))] = 1.
blu[np.where((blu > 1.))] = 1.
red[np.where((red < 0.))] = 0.
grn[np.where((grn < 0.))] = 0.
blu[np.where((blu < 0.))] = 0.
#-- optional color reverse
if reverse==True:
red = red[::-1]
blu = blu[::-1]
grn = grn[::-1]
#-- put in to tuple & dictionary structures needed
rr = []
bb = []
gg = []
for k in range(0,int(nlev)):
rr.append((float(k)/(nlev-1.), red[k], red[k]))
bb.append((float(k)/(nlev-1.), blu[k], blu[k]))
gg.append((float(k)/(nlev-1.), grn[k], grn[k]))
cdict = {'red':rr, 'blue':bb, 'green':gg}
return LSC('cubehelix_map',cdict)
################################################################
################################################################
"""
Turbo, An Improved Rainbow Colormap for Visualization
https://ai.googleblog.com/2019/08/turbo-improved-rainbow-colormap-for.html
<NAME>
One of the most commonly used color mapping algorithms in computer vision applications
is Jet, which is high contrast, making it useful for accentuating even weakly
distinguished image features. However, if you look at the color map gradient,
one can see distinct “bands” of color, most notably in the cyan and yellow regions.
This causes sharp transitions when the map is applied to images, which are misleading
when the underlying data is actually smoothly varying. Because the rate at which the
color changes ‘perceptually’ is not constant, Jet is not perceptually uniform.
Today we are happy to introduce Turbo, a new colormap that has the desirable
properties of Jet while also addressing some of its shortcomings, such as false detail,
banding and color blindness ambiguity.
https://gist.github.com/mikhailov-work/ee72ba4191942acecc03fe6da94fc73f
https://gist.githubusercontent.com/FedeMiorelli/640bbc66b2038a14802729e609abfe89/raw/c84943cb48ca7d7d90e2b882ea46e07613dcfe13/turbo_colormap_mpl.py
"""
turbo_colormap_data = np.array(
[[0.18995,0.07176,0.23217],
[0.19483,0.08339,0.26149],
[0.19956,0.09498,0.29024],
[0.20415,0.10652,0.31844],
[0.20860,0.11802,0.34607],
[0.21291,0.12947,0.37314],
[0.21708,0.14087,0.39964],
[0.22111,0.15223,0.42558],
[0.22500,0.16354,0.45096],
[0.22875,0.17481,0.47578],
[0.23236,0.18603,0.50004],
[0.23582,0.19720,0.52373],
[0.23915,0.20833,0.54686],
[0.24234,0.21941,0.56942],
[0.24539,0.23044,0.59142],
[0.24830,0.24143,0.61286],
[0.25107,0.25237,0.63374],
[0.25369,0.26327,0.65406],
[0.25618,0.27412,0.67381],
[0.25853,0.28492,0.69300],
[0.26074,0.29568,0.71162],
[0.26280,0.30639,0.72968],
[0.26473,0.31706,0.74718],
[0.26652,0.32768,0.76412],
[0.26816,0.33825,0.78050],
[0.26967,0.34878,0.79631],
[0.27103,0.35926,0.81156],
[0.27226,0.36970,0.82624],
[0.27334,0.38008,0.84037],
[0.27429,0.39043,0.85393],
[0.27509,0.40072,0.86692],
[0.27576,0.41097,0.87936],
[0.27628,0.42118,0.89123],
[0.27667,0.43134,0.90254],
[0.27691,0.44145,0.91328],
[0.27701,0.45152,0.92347],
[0.27698,0.46153,0.93309],
[0.27680,0.47151,0.94214],
[0.27648,0.48144,0.95064],
[0.27603,0.49132,0.95857],
[0.27543,0.50115,0.96594],
[0.27469,0.51094,0.97275],
[0.27381,0.52069,0.97899],
[0.27273,0.53040,0.98461],
[0.27106,0.54015,0.98930],
[0.26878,0.54995,0.99303],
[0.26592,0.55979,0.99583],
[0.26252,0.56967,0.99773],
[0.25862,0.57958,0.99876],
[0.25425,0.58950,0.99896],
[0.24946,0.59943,0.99835],
[0.24427,0.60937,0.99697],
[0.23874,0.61931,0.99485],
[0.23288,0.62923,0.99202],
[0.22676,0.63913,0.98851],
[0.22039,0.64901,0.98436],
[0.21382,0.65886,0.97959],
[0.20708,0.66866,0.97423],
[0.20021,0.67842,0.96833],
[0.19326,0.68812,0.96190],
[0.18625,0.69775,0.95498],
[0.17923,0.70732,0.94761],
[0.17223,0.71680,0.93981],
[0.16529,0.72620,0.93161],
[0.15844,0.73551,0.92305],
[0.15173,0.74472,0.91416],
[0.14519,0.75381,0.90496],
[0.13886,0.76279,0.89550],
[0.13278,0.77165,0.88580],
[0.12698,0.78037,0.87590],
[0.12151,0.78896,0.86581],
[0.11639,0.79740,0.85559],
[0.11167,0.80569,0.84525],
[0.10738,0.81381,0.83484],
[0.10357,0.82177,0.82437],
[0.10026,0.82955,0.81389],
[0.09750,0.83714,0.80342],
[0.09532,0.84455,0.79299],
[0.09377,0.85175,0.78264],
[0.09287,0.85875,0.77240],
[0.09267,0.86554,0.76230],
[0.09320,0.87211,0.75237],
[0.09451,0.87844,0.74265],
[0.09662,0.88454,0.73316],
[0.09958,0.89040,0.72393],
[0.10342,0.89600,0.71500],
[0.10815,0.90142,0.70599],
[0.11374,0.90673,0.69651],
[0.12014,0.91193,0.68660],
[0.12733,0.91701,0.67627],
[0.13526,0.92197,0.66556],
[0.14391,0.92680,0.65448],
[0.15323,0.93151,0.64308],
[0.16319,0.93609,0.63137],
[0.17377,0.94053,0.61938],
[0.18491,0.94484,0.60713],
[0.19659,0.94901,0.59466],
[0.20877,0.95304,0.58199],
[0.22142,0.95692,0.56914],
[0.23449,0.96065,0.55614],
[0.24797,0.96423,0.54303],
[0.26180,0.96765,0.52981],
[0.27597,0.97092,0.51653],
[0.29042,0.97403,0.50321],
[0.30513,0.97697,0.48987],
[0.32006,0.97974,0.47654],
[0.33517,0.98234,0.46325],
[0.35043,0.98477,0.45002],
[0.36581,0.98702,0.43688],
[0.38127,0.98909,0.42386],
[0.39678,0.99098,0.41098],
[0.41229,0.99268,0.39826],
[0.42778,0.99419,0.38575],
[0.44321,0.99551,0.37345],
[0.45854,0.99663,0.36140],
[0.47375,0.99755,0.34963],
[0.48879,0.99828,0.33816],
[0.50362,0.99879,0.32701],
[0.51822,0.99910,0.31622],
[0.53255,0.99919,0.30581],
[0.54658,0.99907,0.29581],
[0.56026,0.99873,0.28623],
[0.57357,0.99817,0.27712],
[0.58646,0.99739,0.26849],
[0.59891,0.99638,0.26038],
[0.61088,0.99514,0.25280],
[0.62233,0.99366,0.24579],
[0.63323,0.99195,0.23937],
[0.64362,0.98999,0.23356],
[0.65394,0.98775,0.22835],
[0.66428,0.98524,0.22370],
[0.67462,0.98246,0.21960],
[0.68494,0.97941,0.21602],
[0.69525,0.97610,0.21294],
[0.70553,0.97255,0.21032],
[0.71577,0.96875,0.20815],
[0.72596,0.96470,0.20640],
[0.73610,0.96043,0.20504],
[0.74617,0.95593,0.20406],
[0.75617,0.95121,0.20343],
[0.76608,0.94627,0.20311],
[0.77591,0.94113,0.20310],
[0.78563,0.93579,0.20336],
[0.79524,0.93025,0.20386],
[0.80473,0.92452,0.20459],
[0.81410,0.91861,0.20552],
[0.82333,0.91253,0.20663],
[0.83241,0.90627,0.20788],
[0.84133,0.89986,0.20926],
[0.85010,0.89328,0.21074],
[0.85868,0.88655,0.21230],
[0.86709,0.87968,0.21391],
[0.87530,0.87267,0.21555],
[0.88331,0.86553,0.21719],
[0.89112,0.85826,0.21880],
[0.89870,0.85087,0.22038],
[0.90605,0.84337,0.22188],
[0.91317,0.83576,0.22328],
[0.92004,0.82806,0.22456],
[0.92666,0.82025,0.22570],
[0.93301,0.81236,0.22667],
[0.93909,0.80439,0.22744],
[0.94489,0.79634,0.22800],
[0.95039,0.78823,0.22831],
[0.95560,0.78005,0.22836],
[0.96049,0.77181,0.22811],
[0.96507,0.76352,0.22754],
[0.96931,0.75519,0.22663],
[0.97323,0.74682,0.22536],
[0.97679,0.73842,0.22369],
[0.98000,0.73000,0.22161],
[0.98289,0.72140,0.21918],
[0.98549,0.71250,0.21650],
[0.98781,0.70330,0.21358],
[0.98986,0.69382,0.21043],
[0.99163,0.68408,0.20706],
[0.99314,0.67408,0.20348],
[0.99438,0.66386,0.19971],
[0.99535,0.65341,0.19577],
[0.99607,0.64277,0.19165],
[0.99654,0.63193,0.18738],
[0.99675,0.62093,0.18297],
[0.99672,0.60977,0.17842],
[0.99644,0.59846,0.17376],
[0.99593,0.58703,0.16899],
[0.99517,0.57549,0.16412],
[0.99419,0.56386,0.15918],
[0.99297,0.55214,0.15417],
[0.99153,0.54036,0.14910],
[0.98987,0.52854,0.14398],
[0.98799,0.51667,0.13883],
[0.98590,0.50479,0.13367],
[0.98360,0.49291,0.12849],
[0.98108,0.48104,0.12332],
[0.97837,0.46920,0.11817],
[0.97545,0.45740,0.11305],
[0.97234,0.44565,0.10797],
[0.96904,0.43399,0.10294],
[0.96555,0.42241,0.09798],
[0.96187,0.41093,0.09310],
[0.95801,0.39958,0.08831],
[0.95398,0.38836,0.08362],
[0.94977,0.37729,0.07905],
[0.94538,0.36638,0.07461],
[0.94084,0.35566,0.07031],
[0.93612,0.34513,0.06616],
[0.93125,0.33482,0.06218],
[0.92623,0.32473,0.05837],
[0.92105,0.31489,0.05475],
[0.91572,0.30530,0.05134],
[0.91024,0.29599,0.04814],
[0.90463,0.28696,0.04516],
[0.89888,0.27824,0.04243],
[0.89298,0.26981,0.03993],
[0.88691,0.26152,0.03753],
[0.88066,0.25334,0.03521],
[0.87422,0.24526,0.03297],
[0.86760,0.23730,0.03082],
[0.86079,0.22945,0.02875],
[0.85380,0.22170,0.02677],
[0.84662,0.21407,0.02487],
[0.83926,0.20654,0.02305],
[0.83172,0.19912,0.02131],
[0.82399,0.19182,0.01966],
[0.81608,0.18462,0.01809],
[0.80799,0.17753,0.01660],
[0.79971,0.17055,0.01520],
[0.79125,0.16368,0.01387],
[0.78260,0.15693,0.01264],
[0.77377,0.15028,0.01148],
[0.76476,0.14374,0.01041],
[0.75556,0.13731,0.00942],
[0.74617,0.13098,0.00851],
[0.73661,0.12477,0.00769],
[0.72686,0.11867,0.00695],
[0.71692,0.11268,0.00629],
[0.70680,0.10680,0.00571],
[0.69650,0.10102,0.00522],
[0.68602,0.09536,0.00481],
[0.67535,0.08980,0.00449],
[0.66449,0.08436,0.00424],
[0.65345,0.07902,0.00408],
[0.64223,0.07380,0.00401],
[0.63082,0.06868,0.00401],
[0.61923,0.06367,0.00410],
[0.60746,0.05878,0.00427],
[0.59550,0.05399,0.00453],
[0.58336,0.04931,0.00486],
[0.57103,0.04474,0.00529],
[0.55852,0.04028,0.00579],
[0.54583,0.03593,0.00638],
[0.53295,0.03169,0.00705],
[0.51989,0.02756,0.00780],
[0.50664,0.02354,0.00863],
[0.49321,0.01963,0.00955],
[0.47960,0.01583,0.01055]])
def RGBToPyCmap(rgbdata):
nsteps = rgbdata.shape[0]
stepaxis = np.linspace(0, 1, nsteps)
rdata=[]; gdata=[]; bdata=[]
for istep in range(nsteps):
r = rgbdata[istep,0]
g = rgbdata[istep,1]
b = rgbdata[istep,2]
rdata.append((stepaxis[istep], r, r))
gdata.append((stepaxis[istep], g, g))
bdata.append((stepaxis[istep], b, b))
mpl_data = {'red': rdata,
'green': gdata,
'blue': bdata}
return mpl_data
# register turbo as a matplotlib colourmap for matplotlib versions below 3.3
if float(mpl.__version__[:3]) < 3.3:
mpl_data = RGBToPyCmap(turbo_colormap_data)
# plt.register_cmap(name='turbo', data=mpl_data, lut=turbo_colormap_data.shape[0])
# plt.register_cmap(cmap=LSC(name='turbo', data=mpl_data, lut=turbo_colormap_data.shape[0]))
plt.register_cmap(cmap=LSC('turbo', mpl_data, turbo_colormap_data.shape[0]))
mpl_data = RGBToPyCmap(np.flipud(turbo_colormap_data))
# plt.register_cmap(name='iturbo', data=mpl_data, lut=turbo_colormap_data.shape[0])
# plt.register_cmap(cmap=LSC(name='iturbo', data=mpl_data, lut=turbo_colormap_data.shape[0]))
plt.register_cmap(cmap=LSC('iturbo', mpl_data, turbo_colormap_data.shape[0]))
# usage:
# plt.imshow(ZZ, cmap='turbo')
################################################################
################################################################
##
## plot graphs and confirm the correctness of the functions
if __name__ == '__main__':
import datetime as dt
import ryutils
rit = ryutils.intify_tuple
doAll = False
if doAll:
p = Plotter(1,2,3,figsize=(12,12))
theta = np.linspace(-10,10,20) + np.random.random(20) # in degrees
radius = np.linspace(.5, 1., 20) + np.random.random(20) /50.
thetax2 = np.hstack((theta.reshape(-1,1), -4 + theta.reshape(-1,1)))
radiusx2 = np.hstack((radius.reshape(-1,1), 0.1+radius.reshape(-1,1)))
# plot one data set
p.pie(1,theta,radius,ptitle='test 1',radLabel='Distance m',angLabel='OTA deg',thetaAxis=[-20,20], radiusAxis=[0.5,1],
numAngGrid=3, numRadGrid=5,linewidths=[5],linestyle=[''],markers=['x'],label=['dada'],legendAlpha=0.7,
labelfsize=14,titlefsize=18)
p.pie(2,theta,radius,ptitle='test 2',radLabel='Distance m',angLabel='OTA deg',thetaAxis=[-20,20], radiusAxis=[0.,1],
numAngGrid=3, numRadGrid=5,linestyle=['--'])
# plot two datasets in one np.array
p.pie(3,thetax2,radiusx2,ptitle='test 3',radLabel='Distance m',angLabel='OTA deg',thetaAxis=[-20,20], radiusAxis=[0.5,1],
numAngGrid=3, numRadGrid=5,linewidths=[2,1],linestyle=['-',':'],markers=['v','o'],drawGrid=False,
label=['dada','dodo'],clip_on=False)
p.pie(4,theta+180.,radius,ptitle='',radLabel='Distance m',angLabel='OTA deg',thetaAxis=[90,270], radiusAxis=[0.,1],
numAngGrid=10, numRadGrid=5,linestyle=['--'],degreeformatter="%d")
p.pie(5,theta+180.,radius,ptitle='',radLabel='Distance m',angLabel='OTA deg',thetaAxis=[91,270], radiusAxis=[0.,1],
numAngGrid=10, numRadGrid=5,linestyle=['--'],degreeformatter="%d")
# use the same subplot more than once
p.pie(6,theta+180,radius,ptitle='test 6',radLabel='Distance m',angLabel='OTA deg',
thetaAxis=[135,270], radiusAxis=[0,1],xytickfsize=8,numAngGrid=3, numRadGrid=5,
linewidths=[5],linestyle=[''],markers=['x'],label=['dada'],radangfsize=8)
p.pie(6,theta+185,radius,ptitle='test 6',radLabel='Distance m',angLabel='OTA deg',
thetaAxis=[135,271], radiusAxis=[0,1],xytickfsize=8,numAngGrid=3, numRadGrid=5,
linewidths=[2],linestyle=['-'],markers=['o'],label=['dodo'],markevery=4,radangfsize=8)
p.saveFig('piepol.png')
if doAll: # stacked plot
np.random.seed(1)
fnx = lambda : np.random.randint(5, 50, 10)
y = np.row_stack((fnx(), fnx(), fnx()))
x = np.arange(10)
# Make new array consisting of fractions of column-totals,
# using .astype(float) to avoid integer division
percent = y / y.sum(axis=0).astype(float) * 100
#data must vary along rows for single column (row-major)
percent = percent.T
# print(rit(percent.shape))
sp = Plotter(1,1,1,figsize=(16,8))
sp.stackplot(1,x,percent,'Stack plot','X-axis label','Y-axis label',
plotCol=['crimson','teal','#553300'], label=['aaa','bbb','cccc'],legendAlpha=0.5)
sp.saveFig('stackplot.png')
if doAll: #next line include both 0 and 360 degrees, i.e., overlap on edge
angled = np.linspace(0.,360.,25)
angler = np.pi * angled / 180.
grange = np.linspace(500.,4000.,8)
#create a 2-D meshgrid.
grangeg, anglerg= np.meshgrid(grange,angler + np.pi * 7.5 / 180)
height = 2000.
launch = (1 + np.cos(anglerg) ) ** .1 * (1 - np.exp(-( 500 + grangeg) / 2000.) )
launch *= np.exp(-( 500 + grangeg) / (6000. - height))
launch = np.where(launch<0.2, 0.2, launch)
#normalise
launch -= np.min(launch)
launch /= np.max(launch)
pm = Plotter(1,1,2,figsize=(16,8))
pm.polarMesh(1,angler+np.pi, grange, launch.T,
ptitle='Probability of launch for height {:.0f} [m]'.format(height),
radscale=[0, 4000], cbarshow=True,
cbarorientation='vertical', cbarcustomticks=[], cbarfontsize=12,
rgrid=[500], thetagrid=[45], drawGrid=True,
direction='clockwise', zerooffset=np.pi/2, )
pm.polar3d(2, angler, grange, launch, zlabel='zlabel',
linewidth=1, zscale=[0, 1], azim=135, elev=60, alpha=0.5,edgeCol=['k'])
pm.saveFig('3Dlaunch.png')
if doAll:
############################################################################
#create the wireframe for the sphere
u = np.linspace(0, np.pi, 100)
v = np.linspace(0, 2 * np.pi, 100)
x = np.outer(np.sin(u), np.sin(v))
y = np.outer(np.sin(u), np.cos(v))
z = np.outer(np.cos(u), np.ones_like(v))
#create the random point samples on the sphere
samples = 500
np.random.seed(1)
np.random.RandomState(200)
theta = 2 * np.pi * np.random.uniform(0, 1, size=samples)
#biased sampling with higher density towards the poles
phib = np.pi * (2 * np.random.uniform(0, 1, size=samples) -1 ) / 2
#uniform sampling corrected for polar bias
phiu = np.arccos(2 * np.random.uniform(0, 1, size=samples) -1 ) - np.pi/2
#create normal vectors using the pairs of random angles in a transformation
xsb = np.cos(phib) * np.cos(theta)
ysb = np.cos(phib) * np.sin(theta)
zsb = np.sin(phib)
xsu = np.cos(phiu) * np.cos(theta)
ysu = np.cos(phiu) * np.sin(theta)
zsu = np.sin(phiu)
azim = 45 # view angle
elev = 45 # view angle
sph = Plotter(1,1,2, figsize=(20,10))
sph.mesh3D(1,x,y,z,'','x','y','z',alpha=0.1, wireframe=False, surface=True,linewidth=0, drawGrid=False)
sph.mesh3D(1,x,y,z,'','x','y','z', alphawire=0.4, wireframe=True, surface=False,
edgeCol=['b'],plotCol=['b'],linewidth=0.4,rstride=2,cstride=2, drawGrid=False)
sph.plot3d(1, xsb, ysb, zsb, ptitle='', scatter=True,markers=['o' for i in range(len(xsb))],
azim=azim, elev=elev)
sph.mesh3D(2,x,y,z,'','x','y','z',alpha=0.1, wireframe=False, surface=True,linewidth=0, drawGrid=False)
sph.mesh3D(2,x,y,z,'','x','y','z', alphawire=0.4, wireframe=True, surface=False,
edgeCol=['b'],plotCol=['b'],linewidth=0.4,rstride=2,cstride=2, drawGrid=False)
sph.plot3d(2, xsu, ysu, zsu, ptitle='', scatter=True,markers=['o' for i in range(len(xsu))],
azim=azim, elev=elev)
sph.saveFig('3dsphere.png')
############################################################################
#demonstrate the use of a polar 3d plot
#create the radial and angular vectors
r = np.linspace(0,1.25,25)
p = np.linspace(0,2*np.pi,50)
#the r and p vectors may have non-constant grid-intervals
# r = np.logspace(np.log10(0.001),np.log10(1.25),50)
# p = np.logspace(np.log10(0.001),np.log10(2*np.pi),100)
#build a meshgrid (2-D array of values)
R,P = np.meshgrid(r,p)
#calculate the z values on the cartesian grid
# value = (np.tan(P**3)*np.cos(P**2)*(R**2 - 1)**2)
value = ((R**2 - 1)**2)
p3D = Plotter(1, 1, 1,'Polar plot in 3-D',figsize=(12,8))
p3D.polar3d(1, p, r, value, ptitle='3-D Polar Plot',
xlabel='xlabel', ylabel='ylabel', zlabel='zlabel')#,zscale=[-2,1])
p3D.saveFig('p3D.png')
#p3D.saveFig('p3D.eps')
with open('./data/Intensity-max.dat', 'rt') as fin:
aArray = np.loadtxt( fin, skiprows=1 , dtype=float )
azim = aArray[1:,0] + np.pi # to positive angles
elev = aArray[0,1:] + np.pi/2 # get out of negative data on polar
intensity = aArray[1:,1:]
p3D = Plotter(1, 2, 2,'Polar plot in 3-D',figsize=(12,8))
elev1 = elev
p3D.polar3d(1, azim, elev1, intensity, zlabel='zlabel',zscale=[0, 600], azim=45, elev=30)
p3D.polar3d(2, azim, elev, intensity, zlabel='zlabel',zscale=[0, 2000], azim=-45, elev=30)
elev3 = elev + np.pi/2 # get hole in centre
p3D.polar3d(3, azim, elev3, intensity, zlabel='zlabel',zscale=[0, 2000], azim=60, elev=60)
p3D.polar3d(4, azim, elev, intensity, zlabel='zlabel',zscale=[0, 1000], azim=110, elev=-30)
p3D.saveFig('p3D2.png')
#p3D.saveFig('p3D2.eps')
############################################################################
xv,yv = np.mgrid[-2:2:21j, -2:2:21j]
z = np.exp(np.exp(-(xv**2 + yv**2)))
I = Plotter(4, 1, 2,'High dynamic range image', figsize=(8, 4))
I.showImage(1, z, ptitle='xv**2 + yv**2', titlefsize=10, cbarshow=True, cbarorientation = 'vertical', cbarfontsize = 7)
ip = ProcessImage()
zz, customticksz = ip.compressEqualizeImage(z, 2, 10)
I.showImage(2, zz, ptitle='Equalized xv**2 + yv**2', titlefsize=10, cbarshow=True, cbarorientation = 'vertical', cbarcustomticks=customticksz, cbarfontsize = 7)
I.saveFig('HistoEq.png')
# I.saveFig('HistoEq.eps')
############################################################################
# demonstrate dates on the x-axis
dates = ['01/02/1991','01/03/1991','01/04/1991']
x = np.asarray([dt.datetime.strptime(d,'%m/%d/%Y').date() for d in dates])
y = np.asarray(list(range(len(x))))
pd = Plotter(1)
pd.plot(1,x,y,xIsDate=True,pltaxis=[x[0],x[-1],-1,4],xtickRotation=30)
| pd.saveFig('plotdateX.png') | pandas.saveFig |
#%%[markdown][markdown]
# # Titanic(Classical Problem) For Machine Learning Engineers
from sklearn.metrics import make_scorer, accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn import preprocessing
import matplotlib.pylab as pylab
import matplotlib.pyplot as plt
from pandas import get_dummies
import matplotlib as mpl
import xgboost as xgb
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib
import warnings
import sklearn
import scipy
import numpy
import json
import sys
import csv
import os
sns.set(style='white', context='notebook', palette='deep')
pylab.rcParams['figure.figsize'] = 12,8
warnings.filterwarnings('ignore')
mpl.style.use('ggplot')
sns.set_style('white')
df_train= | pd.read_csv('/home/rahul/Desktop/Link to rahul_environment/Kernels/Titanic_Disaster/train.csv') | pandas.read_csv |
import glob
import random
from pathlib import Path
import numpy as np
import pandas as pd
import pickle5 as pickle
from astropy.table import Table
from scipy import integrate, stats
from scipy.stats import maxwell
from scipy.interpolate import griddata
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
def select_within_boundary(x, y, boundary):
polygon = Polygon(boundary)
p = Point(x, y)
is_in = polygon.contains(p)
return is_in
def maxwell_d(x, scale_s, A_s, scale_f, loc_f):
return A_s * maxwell.pdf(x, scale=scale_s) + (1 - A_s) * maxwell.pdf(
x, scale=scale_f, loc=loc_f
)
def gauss(x, *p):
A, mu, sigma = p
return A * np.exp(-((x - mu) ** 2) / (2 * sigma ** 2))
def dgauss(x, *p):
return gauss(x, p[0], p[1], p[2]) + gauss(x, p[3], p[4], p[5])
def sersic(x, a, b, c, d, m):
return -a * np.exp(-((abs(x - d) / b) ** c)) + m
def pdf(kernel, x, min_x, max_x, int_k):
"""possibility distribution function
Returns:
p (array-like): probability
"""
if x < min_x or x > max_x:
return 0
else:
return kernel(x) / int_k
def log_err_func(x, a, b, c):
return a / (x - b) + c
def gen_kernel_sample(kernel, num, min_x, max_x):
"""Generate mass following Kroupa mass function
Args:
num (int): number of points
min_x (float): minimum boundary
max_x (float): maximum boundary
Returns:
result (array): mass
"""
int_k = integrate.quad(lambda x: kernel(x), min_x, max_x)[0]
sample = []
x = np.linspace(min_x, max_x, 100)
c = pdf(kernel, x[kernel(x).argmax()], min_x, max_x, int_k)
for i in range(num):
flag = 0
while flag == 0:
x = random.uniform(min_x, max_x)
y = random.uniform(0, 1)
if y < pdf(kernel, x, min_x, max_x, int_k) / c:
sample.append(x)
flag = 1
return sample
def z2mh(Z):
Y = 0.2485 + 1.78 * Z
X = 1 - Y - Z
Z_sun = 0.0152
Y_sun = 0.2485 + 1.78 * Z_sun
X_sun = 1 - Y_sun - Z_sun
mh = np.log10(Z / X) - np.log10(Z_sun / X_sun)
return mh
def get_z_tri(y, dist_dir):
Z = list()
dist_list = list()
for mass in y:
info = pickle.load(
open("./pickle/{0}/m{1:.2f}.pkl".format(dist_dir, mass), "rb")
)
vsini_dist = info["vsini_dist"]
vsini_dist_sel = vsini_dist[(vsini_dist.x >= 10) & (vsini_dist.x < 400)]
vsini_dist_sel = vsini_dist_sel.assign(mass=info["mass_mean"])
dist_list.append(vsini_dist_sel)
dist = pd.concat(dist_list)
X = np.linspace(min(dist.x), max(dist.x), 100)
Y = np.linspace(min(dist.mass), max(dist.mass), 50)
Z = griddata((dist.x, dist.mass), dist.g, (X[None, :], Y[:, None]), method="linear")
Z[Z < 0] = 0
return X, Y, Z
def get_z_tri_dist(y, dist_dir):
dist_list = list()
for mass in y:
info = pickle.load(
open("./pickle/{0}/m{1:.2f}.pkl".format(dist_dir, mass), "rb")
)
vsini_dist = info["vsini_dist"]
vsini_dist_sel = vsini_dist[(vsini_dist.x >= 20) & (vsini_dist.x < 400)]
vsini_dist_sel = vsini_dist_sel.assign(mass=info["mass_mean"])
dist_list.append(vsini_dist_sel)
dist = pd.concat(dist_list)
return dist
def get_z_omega_dist(y, dist_dir, ratio):
df = Table.read("./material/Netopil2017.fit").to_pandas()
df = df[df.v_vcrit > 0]
dist_list = list()
for mass in y:
info = pickle.load(
open("./pickle/{0}/m{1:.2f}.pkl".format(dist_dir, mass), "rb")
)
omega_dist = info["omega_dist"]
omega_dist.g[omega_dist.x > 1.03] = 0
g_new = clean_omega_cp_dist(omega_dist, df.v_vcrit, ratio)
omega_dist = omega_dist.assign(mass=info["mass_mean"])
omega_dist.g = g_new
dist_list.append(omega_dist)
dist = | pd.concat(dist_list) | pandas.concat |
# python 2/3 compatibility
from __future__ import division, print_function
import sys
import os.path
import numpy
import pandas
import copy
import difflib
import scipy
import collections
import json
# package imports
import rba
from .rba import RbaModel, ConstraintMatrix, Solver
from .rba_SimulationData import RBA_SimulationData
from .rba_SimulationParameters import RBA_SimulationParameters
from .rba_ModelStructure import RBA_ModelStructure
from .rba_Problem import RBA_Problem
from .rba_Matrix import RBA_Matrix
from .rba_LP import RBA_LP
from .rba_FBA import RBA_FBA
from .rba_LogBook import RBA_LogBook
class RBA_Session(object):
"""
Top level of the RBA API.
Attributes
----------
xml_dir : str
Current Growth rate as numeric value
model : rba.RbaModel
Current Growth rate as numeric value
matrices : rba.ConstraintMatrix
Current Growth rate as numeric value
solver : rba.Solver
Current Growth rate as numeric value
Problem : rbatools.RBA_Problem
Current Growth rate as numeric value
Medium : dict
Current Growth rate as numeric value
ModelStructure : rbatools.RBA_ModelStructure
Current Growth rate as numeric value
Results : dict
Current Growth rate as numeric value
Parameters : dict
Current Growth rate as numeric value
SimulationData : rbatools.RBA_SimulationData
Current Growth rate as numeric value
SimulationParameters : rbatools.RBA_SimulationParameters
Current Growth rate as numeric value
Methods
----------
__init__(xml_dir)
Creates RBA_Session object from files
Parameters
----------
xml_dir : str
Path to the directory where rba-model files are located.
rebuild_from_model()
Rebuilds computational model-representation (matrix) from own attribute "model" (rba.RbaModel-object).
reloadModel()
Reloads model from xml-files and then rebuild computational model-representation (matrix).
recordResults(runName)
Records Simulation output for further use.
and strores them in own 'Results'-attribute as pandas.DataFrames in a dictionary with the respective run-name being a column in all DataFrames.
Parameters
----------
runName : str
Name of observation/condition.
Serves as ID for all Data, originating from these.
recordParameters(runName)
Records Simulation parameters (LP-coefficients etc.) for further use.
and strores them in own 'Parameters'-attribute as pandas.DataFrames in a dictionary with the respective run-name being a column in all DataFrames.
Parameters
----------
runName : str
Name of observation/condition.
Serves as ID for all Data, originating from these.
clearResults()
Removes all previosly recorded results and deletes own 'Results'-attribute.
clearParameters()
Removes all previosly recorded parameters and deletes own 'Parameters'-attribute.
writeResults(session_name='', digits=10)
Creates SimulationData and SimulationParameters objects from recordings ('Results'.'Parameters').
Stores them as rbatools.RBA_SimulationData
and rbatools.RBA_SimulationParameters objects as attributes.
Access via attributes .SimulationData and SimulationParameters respectively.
Parameters
----------
digits : int
Number of decimal places in the numeric results
Default: 10
session_name : str
Name of Simulation session.
Default: ''
returnExchangeFluxes()
Returns a dictonary with the exchang-rates of boundary-metabolites.
Returns
-------
Dictonary with exchange-keys and respective -rates.
ConstraintSaturation(constraints=None)
Determines the saturation of model constraints at current solution.
Parameters
----------
constraints : str or list of str
Specifies constraints(s) for which the saturation is to be determined.
Default-value = None:
All model-constraints are taken
Returns
-------
Pandas DataFrame with constraint-names as indices and the columns 'LHS', 'RHS', and 'Saturation'.
'LHS': The sum over the respoctive constraint-row multiplied elementwise with the solution vector.
'RHS': The value of the problem's righthand side, correesponding to the respective constraint.
'Saturation': The saturation of the respective constraint ('LHS'/'RHS').
(Equality constraints are always saturated)
setMedium(changes)
Sets the concentration of specified growth-substrate(s) in medium.
Parameters
----------
changes : dict
Keys : ID of metabolite(s) in medium.
Values : New concention(s)
setMu(Mu)
Sets growth-rate to specified value.
Parameters
----------
Mu : float
Growth rate
doSolve(runName='DontSave')
Solves problem to find solution.
Does the same as rbatools.RBA_Problem.solveLP().
Just has some automatic option for results-recording.
Parameters
----------
runName : str
Name of observation.
Serves as ID for all data, originating from this run.
Special values :
'DontSave' : Results are not recorded
'Auto' : Results are automatically recorded
and appended to existing ones.
Named with number.
Any other string: Results are recorded under this name.
Default: 'DontSave'
findMaxGrowthRate(precision=0.0005, max=4, start_value=None, recording=False)
Applies dichotomy-search to find the maximal feasible growth-rate.
Parameters
----------
precision : float
Numberic precision with which maximum is approximated.
Default : 0.00001
max : float
Defines the highest growth rate to be screened for.
Default=4
start_value : float
Defines a starting-value of the search for the maximum growth-rate.
A close starting-value reduces the required number of iterations, for the algorithm to converge.
If not provided search starts at growth-rate 0.
Default = None
recording : bool
Records intermediate feasible solutions
while approaching the maximum growth-rate.
Default : False
Returns
-------
maximum feasible growth rate as float.
knockOut(gene)
Simulates a gene knock out.
Constrains all variables in the LP-problem (enzymes, other machineries), which require this gene(s), to zero.
Parameters
----------
gene : str or list of strings
ID(s) of model-proteins to be knocked out.
Can either be gene-identifier, represented as ID or ProtoID of proteins in rbatools.protein_bloc.ProteinBlock.Elements class (depends on whether protein-isoforms are considered).
FeasibleRange(variables=None)
Determines the feasible range of model variables.
Parameters
----------
variables : str or list of str
Specifies variable(s) for which the feasible range is to be determined.
Default-value = None:
All model-variables are taken
Returns
-------
Dictionary with variable-names as keys and other dictionaries as values.
The 'inner' dictionaries hold keys 'Min' and 'Max'
with values representing lower and upper bound of feasible range respectively.
E.g. : {'variableA':{'Min':42 , 'Max':9000},
'variableB':{'Min':-9000 , 'Max':-42}}
ParetoFront(variable_X, variable_Y, N=10, sign_VY='max')
Determine Pareto front of two model variables.
Parameters
----------
variable_X : str
ID of variable, representing the X-coordinate of the Pareto-front
variable_Y : str
ID of variable, representing the Y-coordinate of the Pareto-front
N : int
Number of intervals within the feasible range of variable_X.
Default-value=10.
sign_VY : str
'max': variable_Y is maximised
'min': variable_Y is minimised
Returns
-------
Pandas DataFrame with columns named after the two input variables
and 'N' rows. Each row represents an interval on the Pareto front.
Entries on each row are the X and Y coordinate on the Pareto front,
representing the values of the two variables.
"""
def __init__(self, xml_dir):
"""
Creates RBA_Session object from files
Parameters
----------
xml_dir : str
Path to the directory where rba-model files are located.
"""
self.xml_dir = xml_dir
self.LogBook = RBA_LogBook('Controler')
if not hasattr(self, 'ModelStructure'):
if os.path.isfile(str(self.xml_dir+'/ModelStructure.json')):
self.ModelStructure = RBA_ModelStructure()
with open(str(self.xml_dir+'/ModelStructure.json'), 'r') as myfile:
data = myfile.read()
self.ModelStructure.fromJSON(inputString=data)
else:
self.build_ModelStructure()
self.model = RbaModel.from_xml(input_dir=xml_dir)
self.matrices = ConstraintMatrix(model=self.model)
self.solver = Solver(matrix=self.matrices)
self.LogBook.addEntry('Model loaded from {}.'.format(self.xml_dir))
self.Problem = RBA_Problem(solver=self.solver)
medium = pandas.read_csv(xml_dir+'/medium.tsv', sep='\t')
self.Medium = dict(zip(list(medium.iloc[:, 0]), [float(i)
for i in list(medium.iloc[:, 1])]))
self.Mu = self.Problem.Mu
self.ExchangeMap = buildExchangeMap(self)
def build_ModelStructure(self):
self.ModelStructure = RBA_ModelStructure()
self.ModelStructure.fromFiles(xml_dir=self.xml_dir)
self.ModelStructure.exportJSON(path=self.xml_dir)
def addExchangeReactions(self):
"""
Adds explicit exchange-reactions of boundary-metabolites to RBA-problem, named R_EX_ followed by metabolite name (without M_ prefix).
"""
Mets_external = [m.id for m in self.model.metabolism.species if m.boundary_condition]
Mets_internal = [m.id for m in self.model.metabolism.species if not m.boundary_condition]
Reactions = [r.id for r in self.model.metabolism.reactions]
full_S = rba.core.metabolism.build_S(
Mets_external+Mets_internal, self.model.metabolism.reactions)
S_M_ext = full_S[:len(Mets_external), ].toarray()
col_indices_toremove = []
for i in range(S_M_ext.shape[1]):
s_col_uniques = list(set(list(S_M_ext[:, i])))
if len(s_col_uniques) == 1:
if s_col_uniques[0] == 0:
col_indices_toremove.append(i)
RemainingReactions = [i for i in Reactions if Reactions.index(
i) not in col_indices_toremove]
S_ext = numpy.delete(S_M_ext, col_indices_toremove, axis=1)
A = numpy.concatenate((S_ext, numpy.eye(len(Mets_external))), axis=1, out=None)
ColNames = RemainingReactions+[str('R_EX_'+i.split('M_')[-1]) for i in Mets_external]
# print(str('R_EX_'+i.split('M_')[-1]))
LBs = list([self.Problem.LP.LB[self.Problem.LP.col_names.index(i)]
for i in RemainingReactions]+[-10000]*len(Mets_external))
UBs = list([self.Problem.LP.UB[self.Problem.LP.col_names.index(i)]
for i in RemainingReactions]+[10000]*len(Mets_external))
b = [0]*len(Mets_external)
f = list([self.Problem.LP.f[self.Problem.LP.col_names.index(i)]
for i in RemainingReactions]+[0]*len(Mets_external))
ExchangeMatrix = RBA_Matrix()
ExchangeMatrix.A = scipy.sparse.coo_matrix(A)
ExchangeMatrix.b = numpy.array([0]*len(Mets_external))
ExchangeMatrix.f = numpy.array(f)
ExchangeMatrix.LB = numpy.array(LBs)
ExchangeMatrix.UB = numpy.array(UBs)
ExchangeMatrix.row_signs = ['E']*len(Mets_external)
ExchangeMatrix.row_names = Mets_external
ExchangeMatrix.col_names = ColNames
ExchangeMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=ExchangeMatrix)
self.ExchangeReactionMap = dict(
zip(Mets_external, [str('R_EX_'+i.split('M_')[-1]) for i in Mets_external]))
def rebuild_from_model(self):
"""
Rebuilds computational model-representation (matrix) from own attribute "model" (rba.RbaModel-object).
"""
self.LogBook.addEntry('Model rebuilt.')
self.matrices = ConstraintMatrix(model=self.model)
self.solver = Solver(matrix=self.matrices)
self.Problem = RBA_Problem(solver=self.solver)
self.setMedium(changes=self.Medium)
def reloadModel(self):
"""
Reloads model from xml-files and then rebuild computational model-representation (matrix).
"""
self.LogBook.addEntry('Model reloaded from {}.'.format(self.xml_dir))
self.model = RbaModel.from_xml(input_dir=self.xml_dir)
self.rebuild_from_model()
def recordResults(self, runName):
"""
Records Simulation output for further use.
and strores them in own 'Results'-attribute as pandas.DataFrames in a dictionary with the respective run-name being a column in all DataFrames.
Parameters
----------
runName : str
Name of observation/condition.
Serves as ID for all Data, originating from these.
"""
self.LogBook.addEntry('Solution recorded under {}.'.format(runName))
if not hasattr(self, 'Results'):
self.Results = {'Reactions': pandas.DataFrame(index=list(self.ModelStructure.ReactionInfo.Elements.keys())),
'Enzymes': pandas.DataFrame(index=list(self.ModelStructure.EnzymeInfo.Elements.keys())),
'Processes': pandas.DataFrame(index=[self.ModelStructure.ProcessInfo.Elements[i]['ID']+'_machinery' for i in self.ModelStructure.ProcessInfo.Elements.keys()]),
'Proteins': pandas.DataFrame(index=list(self.ModelStructure.ProteinMatrix['Proteins'])),
'ProtoProteins': pandas.DataFrame(index=list(self.ModelStructure.ProteinGeneMatrix['ProtoProteins'])),
'Constraints': pandas.DataFrame(index=self.Problem.LP.row_names),
'SolutionType': pandas.DataFrame(index=['SolutionType']),
'Mu': pandas.DataFrame(index=['Mu']),
'ObjectiveFunction': pandas.DataFrame(index=self.Problem.LP.col_names),
'ObjectiveValue': pandas.DataFrame(index=['ObjectiveValue']),
'ExchangeFluxes': pandas.DataFrame(index=list(self.ExchangeMap.keys()))}
Exchanges = self.returnExchangeFluxes()
for i in Exchanges.keys():
self.Results['ExchangeFluxes'].loc[i, runName] = Exchanges[i]
self.Results['Reactions'][runName] = self.Results['Reactions'].index.map(
{i: self.Problem.SolutionValues[i] for i in list(self.Results['Reactions'].index)})
self.Results['Enzymes'][runName] = self.Results['Enzymes'].index.map(
{i: self.Problem.SolutionValues[i] for i in list(self.Results['Enzymes'].index)})
self.Results['Processes'][runName] = self.Results['Processes'].index.map(
{i: self.Problem.SolutionValues[i] for i in list(self.Results['Processes'].index)})
self.Results['Constraints'][runName] = self.Results['Constraints'].index.map(
{i: self.Problem.DualValues[i] for i in self.Problem.LP.row_names})
self.Results['Proteins'][runName] = self.Results['Proteins'].index.map(
ProteomeRecording(self, runName))
self.Results['ProtoProteins'][runName] = self.Results['ProtoProteins'].index.map(
ProtoProteomeRecording(self, runName, self.Results['Proteins']))
self.Results['SolutionType'][runName] = self.Problem.SolutionType
self.Results['Mu'][runName] = self.Problem.Mu
self.Results['ObjectiveFunction'][runName] = list(self.Problem.getObjective().values())
self.Results['ObjectiveValue'][runName] = self.Problem.ObjectiveValue
def recordParameters(self, runName):
"""
Records Simulation parameters (LP-coefficients etc.) for further use.
and strores them in own 'Parameters'-attribute as pandas.DataFrames in a dictionary with the respective run-name being a column in all DataFrames.
Parameters
----------
runName : str
Name of observation/condition.
Serves as ID for all Data, originating from these.
"""
self.LogBook.addEntry('Coefficients recorded under {}.'.format(runName))
EnzymeCapacities = self.get_parameter_values(
parameter_type='enzyme_efficiencies', species=None, output_format='dict')
ProcessCapacities = self.get_parameter_values(
parameter_type='machine_efficiencies', species=None, output_format='dict')
CompartmentCapacities = self.get_parameter_values(
parameter_type='maximal_densities', species=None, output_format='dict')
TargetValues = self.get_parameter_values(
parameter_type='target_values', species=None, output_format='dict')
if not hasattr(self, 'Parameters'):
self.Parameters = {'EnzymeEfficiencies_FW': pandas.DataFrame(index=list(EnzymeCapacities.keys())),
'EnzymeEfficiencies_BW': pandas.DataFrame(index=list(EnzymeCapacities.keys())),
'ProcessEfficiencies': pandas.DataFrame(index=list(ProcessCapacities.keys())),
'CompartmentCapacities': pandas.DataFrame(index=list(CompartmentCapacities.keys())),
'Medium': pandas.DataFrame(index=self.Medium.keys()),
'TargetValues': pandas.DataFrame(index=[TargetValues[i]['Target_id'] for i in list(TargetValues.keys())])}
self.Parameters['EnzymeEfficiencies_FW'][runName] = self.Parameters['EnzymeEfficiencies_FW'].index.map({i: list(
EnzymeCapacities[i]['Forward'].values())[0] for i in list(EnzymeCapacities.keys()) if len(list(EnzymeCapacities[i]['Forward'].values())) > 0})
self.Parameters['EnzymeEfficiencies_BW'][runName] = self.Parameters['EnzymeEfficiencies_BW'].index.map({i: list(
EnzymeCapacities[i]['Backward'].values())[0] for i in list(EnzymeCapacities.keys()) if len(list(EnzymeCapacities[i]['Forward'].values())) > 0})
self.Parameters['ProcessEfficiencies'][runName] = self.Parameters['ProcessEfficiencies'].index.map(
{i: list(ProcessCapacities[i].values())[0] for i in list(ProcessCapacities.keys()) if len(list(ProcessCapacities[i].values())) > 0})
self.Parameters['CompartmentCapacities'][runName] = self.Parameters['CompartmentCapacities'].index.map(
{i: list(CompartmentCapacities[i].values())[0] for i in list(CompartmentCapacities.keys()) if len(list(CompartmentCapacities[i].values())) > 0})
self.Parameters['Medium'][runName] = self.Parameters['Medium'].index.map(self.Medium)
self.Parameters['TargetValues'][runName] = self.Parameters['TargetValues'].index.map(
{TargetValues[i]['Target_id']: list(TargetValues[i]['Target_value'].values())[0] for i in list(TargetValues.keys()) if len(list(TargetValues[i]['Target_value'].values())) > 0})
def clearResults(self):
"""
Removes all previosly recorded results and deletes own 'Results'-attribute.
"""
self.LogBook.addEntry('Results cleared.')
delattr(self, 'Results')
def clearParameters(self):
"""
Removes all previosly recorded parameters and deletes own 'Parameters'-attribute.
"""
self.LogBook.addEntry('Parameters cleared.')
delattr(self, 'Parameters')
def writeResults(self, session_name='', digits=5, loggingIntermediateSteps=False):
"""
Creates SimulationData and SimulationParameters objects from recordings ('Results'.'Parameters').
Stores them as rbatools.RBA_SimulationData
and rbatools.RBA_SimulationParameters objects as attributes.
Access via attributes .SimulationData and SimulationParameters respectively.
Parameters
----------
digits : int
Number of decimal places in the numeric results
Default: 10
session_name : str
Name of Simulation session.
Default: ''
"""
self.LogBook.addEntry('Data written under {}.'.format(session_name))
if hasattr(self, 'Results'):
self.Results['uniqueReactions'] = mapIsoReactions(Controller=self)
self.Results['SolutionType'] = self.Results['SolutionType']
self.Results['Mu'] = self.Results['Mu'].round(digits)
self.Results['ObjectiveFunction'] = self.Results['ObjectiveFunction'].loc[(
self.Results['ObjectiveFunction'] != 0).any(axis=1)].round(digits)
self.Results['ObjectiveValue'] = self.Results['ObjectiveValue'].round(digits)
self.Results['Proteins'] = self.Results['Proteins'].round(digits)
self.Results['uniqueReactions'] = self.Results['uniqueReactions'].round(digits)
self.Results['Reactions'] = self.Results['Reactions'].round(digits)
self.Results['Enzymes'] = self.Results['Enzymes'].round(digits)
self.Results['Processes'] = self.Results['Processes'].round(digits)
self.Results['Constraints'] = self.Results['Constraints'].round(digits)
self.Results['ExchangeFluxes'] = self.Results['ExchangeFluxes'].round(digits)
self.SimulationData = RBA_SimulationData(StaticData=self.ModelStructure)
self.SimulationData.fromSimulationResults(Controller=self, session_name=session_name)
if hasattr(self, 'Parameters'):
self.Parameters['EnzymeEfficiencies_FW'] = self.Parameters['EnzymeEfficiencies_FW'].round(
digits)
self.Parameters['EnzymeEfficiencies_BW'] = self.Parameters['EnzymeEfficiencies_BW'].round(
digits)
self.Parameters['ProcessEfficiencies'] = self.Parameters['ProcessEfficiencies'].round(
digits)
self.Parameters['CompartmentCapacities'] = self.Parameters['CompartmentCapacities'].round(
digits)
self.Parameters['TargetValues'] = self.Parameters['TargetValues'].round(digits)
self.Parameters['Medium'] = self.Parameters['Medium'].loc[(
self.Parameters['Medium'] != 0).any(axis=1)].round(digits)
self.SimulationParameters = RBA_SimulationParameters(StaticData=self.ModelStructure)
self.SimulationParameters.fromSimulationResults(Controller=self)
def returnExchangeFluxes(self):
"""
Returns a dictonary with the exchang-rates of boundary-metabolites.
Returns
-------
Dictonary with exchange-keys and respective -rates.
"""
out = {}
for j in self.ExchangeMap.keys():
netflux = 0
for k in self.ExchangeMap[j].keys():
netflux += self.ExchangeMap[j][k]*self.Problem.SolutionValues[k]
if netflux != 0:
out[j] = netflux
return(out)
def ConstraintSaturation(self, constraints=None):
"""
Determines the saturation of model constraints at current solution.
Parameters
----------
constraints : str or list of str
Specifies constraints(s) for which the saturation is to be determined.
Default-value = None:
All model-constraints are taken
Returns
-------
Pandas DataFrame with constraint-names as indices and the columns 'LHS', 'RHS', and 'Saturation'.
'LHS': The sum over the respoctive constraint-row multiplied elementwise with the solution vector.
'RHS': The value of the problem's righthand side, correesponding to the respective constraint.
'Saturation': The saturation of the respective constraint ('LHS'/'RHS').
(Equality constraints are always saturated)
"""
if constraints is None:
ConstraintsInQuestion = self.Problem.LP.row_names
else:
if isinstance(constraints, list):
ConstraintsInQuestion = constraints
elif isinstance(constraints, str):
ConstraintsInQuestion = [constraints]
if len(list(constraints)) > 0:
if isinstance(constraints[0], list):
ConstraintsInQuestion = constraints[0]
if isinstance(constraints[0], str):
ConstraintsInQuestion = [constraints[0]]
if len(list(constraints)) == 0:
ConstraintsInQuestion = self.Problem.LP.row_names
rhs = self.Problem.getRighthandSideValue(ConstraintsInQuestion)
lhs = self.Problem.calculateLefthandSideValue(ConstraintsInQuestion)
RHS = list(rhs.values())
LHS = list(lhs.values())
Out = pandas.DataFrame(columns=['LHS', 'RHS', 'Saturation'], index=ConstraintsInQuestion)
for i in ConstraintsInQuestion:
lhval = LHS[self.Problem.LP.rowIndicesMap[i]]
rhval = RHS[self.Problem.LP.rowIndicesMap[i]]
sat = numpy.nan
if rhval != 0:
sat = lhval/rhval
Out.loc[i, 'LHS'] = lhval
Out.loc[i, 'RHS'] = rhval
Out.loc[i, 'Saturation'] = sat
self.LogBook.addEntry(
'Saturation of constraint {} determined to be {}.'.format(i, sat))
return(Out)
def setMedium(self, changes, loggingIntermediateSteps=False):
"""
Sets the concentration of specified growth-substrate(s) in medium.
Parameters
----------
changes : dict
Keys : ID of metabolite(s) in medium.
Values : New concention(s)
"""
for species in (changes.keys()):
self.Medium[species] = float(changes[species])
self.Problem.ClassicRBAmatrix.set_medium(self.Medium)
self.Problem.ClassicRBAmatrix.build_matrices(self.Mu)
inputMatrix = RBA_Matrix()
inputMatrix.loadMatrix(matrix=self.Problem.ClassicRBAmatrix)
self.Problem.LP.updateMatrix(matrix=inputMatrix, Ainds=MediumDependentCoefficients_A(
self), Binds=[], CTinds=[], LBinds=None, UBinds=None)
def setMu(self, Mu, loggingIntermediateSteps=False):
"""
Sets growth-rate to specified value.
Parameters
----------
Mu : float
Growth rate
"""
self.LogBook.addEntry('Growth-rate changed:{} --> {}'.format(self.Mu, float(Mu)))
self.Problem.setMu(Mu=float(Mu), ModelStructure=self.ModelStructure,
logging=loggingIntermediateSteps)
self.Mu = float(Mu)
def doSolve(self, runName='DontSave', loggingIntermediateSteps=False):
"""
Solves problem to find solution.
Does the same as rbatools.RBA_Problem.solveLP().
Just has some automatic option for results-recording.
Parameters
----------
runName : str
Name of observation.
Serves as ID for all data, originating from this run.
Special values :
'DontSave' : Results are not recorded
'Auto' : Results are automatically recorded
and appended to existing ones.
Named with number.
Any other string: Results are recorded under this name.
Default: 'DontSave'
"""
self.Problem.solveLP(logging=loggingIntermediateSteps)
if self.Problem.Solved:
if runName is not 'DontSave':
if runName is 'Auto':
if hasattr(self, 'Results'):
name = str(self.Results['Reactions'].shape[1]+1)
if not hasattr(self, 'Results'):
name = '1'
if runName is not 'Auto':
name = runName
self.recordResults(runName=name)
def findMaxGrowthRate(self, precision=0.0005, max=4, start_value=None, recording=False, loggingIntermediateSteps=False):
"""
Applies dichotomy-search to find the maximal feasible growth-rate.
Parameters
----------
precision : float
Numberic precision with which maximum is approximated.
Default : 0.00001
max : float
Defines the highest growth rate to be screened for.
Default=4
start_value : float
Defines a starting-value of the search for the maximum growth-rate.
A close starting-value reduces the required number of iterations, for the algorithm to converge.
If not provided search starts at growth-rate 0.
Default = None
recording : bool
Records intermediate feasible solutions
while approaching the maximum growth-rate.
Default : False
Returns
-------
maximum feasible growth rate as float.
"""
minMu = 0
maxMu = max
if start_value is None:
testMu = minMu
else:
testMu = start_value
iteration = 0
while (maxMu - minMu) > precision:
self.setMu(Mu=testMu)
self.Problem.solveLP(logging=loggingIntermediateSteps)
if self.Problem.Solved:
iteration += 1
if recording:
self.recordResults('DichotomyMu_iteration_'+str(iteration))
minMu = testMu
else:
maxMu = testMu
testMu = numpy.mean([maxMu, minMu])
self.LogBook.addEntry('Maximal growth-rate found to be: {}.'.format(minMu))
if minMu == max:
print('Warning: Maximum growth rate might exceed specified range. Try rerunning this method with larger max-argument.')
self.setMu(Mu=minMu)
self.Problem.solveLP(logging=False)
self.Problem.SolutionType = 'GrowthRate_maximization'
return(minMu)
def knockOut(self, gene, loggingIntermediateSteps=False):
"""
Simulates a gene knock out.
Constrains all variables in the LP-problem (enzymes, other machineries), which require this gene(s), to zero.
Parameters
----------
gene : str or list of strings
ID(s) of model-proteins to be knocked out.
Can either be gene-identifier, represented as ID or ProtoID of proteins in rbatools.protein_bloc.ProteinBlock.Elements class (depends on whether protein-isoforms are considered).
"""
if type(gene) is str:
genes = [gene]
if type(gene) is list:
genes = gene
isoform_genes = [g for g in genes if g in list(self.ModelStructure.ProteinInfo.Elements.keys(
))]+[i for g in genes for i in self.ModelStructure.ProteinInfo.Elements.keys() if self.ModelStructure.ProteinInfo.Elements[i]['ProtoID'] == g]
for g in isoform_genes:
self.LogBook.addEntry('Gene {} knocked out.'.format(g))
ConsumersEnzymes = self.ModelStructure.ProteinInfo.Elements[g]['associatedEnzymes']
for i in ConsumersEnzymes:
LikeliestVarName = difflib.get_close_matches(i, self.Problem.LP.col_names, 1)[0]
self.Problem.setLB(inputDict={LikeliestVarName: 0},
logging=loggingIntermediateSteps)
self.Problem.setUB(inputDict={LikeliestVarName: 0},
logging=loggingIntermediateSteps)
ConsumersProcess = self.ModelStructure.ProteinInfo.Elements[g]['SupportsProcess']
for i in ConsumersProcess:
LikeliestVarName = difflib.get_close_matches(
str(self.ModelStructure.ProcessInfo.Elements[i]['ID']+'_machinery'), self.Problem.LP.col_names, 1)[0]
self.Problem.setLB(inputDict={LikeliestVarName: 0},
logging=loggingIntermediateSteps)
self.Problem.setUB(inputDict={LikeliestVarName: 0},
logging=loggingIntermediateSteps)
def FeasibleRange(self, variables=None, loggingIntermediateSteps=False):
"""
Determines the feasible range of model variables.
Parameters
----------
variables : str or list of str
Specifies variable(s) for which the feasible range is to be determined.
Default-value = None:
All model-variables are taken
Returns
-------
Dictionary with variable-names as keys and other dictionaries as values.
The 'inner' dictionaries hold keys 'Min' and 'Max'
with values representing lower and upper bound of feasible range respectively.
E.g. : {'variableA':{'Min':42 , 'Max':9000},
'variableB':{'Min':-9000 , 'Max':-42}}
"""
if variables is None:
VariablesInQuestion = self.Problem.LP.col_names
else:
if isinstance(variables, list):
VariablesInQuestion = variables
elif isinstance(variables, str):
VariablesInQuestion = [variables]
out = {}
for i in VariablesInQuestion:
min = numpy.nan
max = numpy.nan
self.Problem.clearObjective(logging=loggingIntermediateSteps)
self.Problem.setObjectiveCoefficients(
inputDict={i: 1.0}, logging=loggingIntermediateSteps)
self.Problem.solveLP(logging=loggingIntermediateSteps)
if self.Problem.Solved:
min = self.Problem.SolutionValues[i]
self.Problem.setObjectiveCoefficients(
inputDict={i: -1.0}, logging=loggingIntermediateSteps)
self.Problem.solveLP(logging=loggingIntermediateSteps)
if self.Problem.Solved:
max = self.Problem.SolutionValues[i]
out.update({i: {'Min': min, 'Max': max}})
self.LogBook.addEntry(
'Feasible-range of {} determined to be between {} and {}.'.format(i, min, max))
return(out)
def ParetoFront(self, variable_X, variable_Y, N=10, sign_VY='max', loggingIntermediateSteps=False):
"""
Determine Pareto front of two model variables.
Parameters
----------
variable_X : str
ID of variable, representing the X-coordinate of the Pareto-front
variable_Y : str
ID of variable, representing the Y-coordinate of the Pareto-front
N : int
Number of intervals within the feasible range of variable_X.
Default-value=10.
sign_VY : str
'max': variable_Y is maximised
'min': variable_Y is minimised
Returns
-------
Pandas DataFrame with columns named after the two input variables
and 'N' rows. Each row represents an interval on the Pareto front.
Entries on each row are the X and Y coordinate on the Pareto front,
representing the values of the two variables.
"""
if variable_X not in self.Problem.LP.col_names:
print('Chosen Element not among problem variables')
return
if variable_Y not in self.Problem.LP.col_names:
print('Chosen Element not among problem variables')
return
FR = self.FeasibleRange(variable_X)
cMin = FR[variable_X]['Min']
cMax = FR[variable_X]['Max']
concentrations = [float(cMin+(cMax-cMin)*i/N) for i in range(N+1)]
Out = pandas.DataFrame(columns=[variable_X, variable_Y])
oldLB = self.Problem.getLB(variable_X)
oldUB = self.Problem.getUB(variable_X)
iteration = -1
for conc in concentrations:
iteration += 1
self.Problem.setLB(inputDict={variable_X: conc}, logging=loggingIntermediateSteps)
self.Problem.setUB(inputDict={variable_X: conc}, logging=loggingIntermediateSteps)
self.Problem.clearObjective(logging=loggingIntermediateSteps)
if sign_VY == 'max':
self.Problem.setObjectiveCoefficients(
inputDict={variable_Y: -1}, logging=loggingIntermediateSteps)
if sign_VY == 'min':
self.Problem.setObjectiveCoefficients(
inputDict={variable_Y: 1}, logging=loggingIntermediateSteps)
self.Problem.solveLP(logging=loggingIntermediateSteps)
if self.Problem.Solved:
max = abs(self.Problem.ObjectiveValue)
else:
max = numpy.nan
self.Problem.setLB(inputDict=oldLB, logging=loggingIntermediateSteps)
self.Problem.setUB(inputDict=oldUB, logging=loggingIntermediateSteps)
Out.loc[iteration, variable_X] = conc
Out.loc[iteration, variable_Y] = max
self.LogBook.addEntry(
'Pareto-front between {} and {} determined.'.format(variable_X, variable_Y))
return(Out)
### !!! Docstring ###
def buildFBA(self, type='classic', objective='classic', maintenanceToBM=False):
"""
Derives and constructs FBA-problem from RBA-problem and stores it under attribute 'FBA'.
Parameters
----------
type : str
objective : str
maintenanceToBM : boolean
"""
RBAproblem = self.Problem.LP
A = RBAproblem.A.toarray()
if type == 'classic':
Cols2remove = list(set([RBAproblem.col_names.index(i) for i in RBAproblem.col_names if not i.startswith('R_') and not i.startswith('M_') and not i.endswith('_synthesis')]
+ [RBAproblem.col_names.index(i) for i in RBAproblem.col_names if '_duplicate_' in i]
+ [RBAproblem.col_names.index(i) for i in RBAproblem.col_names if 'enzyme' in i]))
Rows2remove = [RBAproblem.row_names.index(
i) for i in RBAproblem.row_names if not i.startswith('M_')]
elif type == 'parsi':
Cols2remove = list(set([RBAproblem.col_names.index(i) for i in RBAproblem.col_names if not i.startswith(
'R_') and not i.startswith('M_') and not i.endswith('_synthesis')]+[RBAproblem.col_names.index(i) for i in RBAproblem.col_names if '_duplicate_' in i]))
Rows2remove = [RBAproblem.row_names.index(
i) for i in RBAproblem.row_names if not i.startswith('R_') and not i.startswith('M_')]
if objective == 'classic':
if 'R_maintenance_atp' in RBAproblem.col_names:
Cols2remove.append(RBAproblem.col_names.index('R_maintenance_atp'))
Anew = numpy.delete(A, Cols2remove, axis=1)
col_namesNew = list(numpy.delete(RBAproblem.col_names, Cols2remove))
LBnew = numpy.delete(RBAproblem.LB, Cols2remove)
UBnew = numpy.delete(RBAproblem.UB, Cols2remove)
fNew = numpy.delete(RBAproblem.f, Cols2remove)
Anew2 = numpy.delete(Anew, Rows2remove, axis=0)
row_namesNew = list(numpy.delete(RBAproblem.row_names, Rows2remove))
row_signsNew = list(numpy.delete(RBAproblem.row_signs, Rows2remove))
bNew = numpy.delete(RBAproblem.b, Rows2remove)
trnaInds = [i for i in range(len(row_namesNew)) if row_namesNew[i].startswith(
'M_') and 'trna' in row_namesNew[i]]
# bNew[trnaInds] = 0
if objective == 'targets':
col_namesNew.append('R_BIOMASS_targetsRBA')
LBnew = numpy.append(LBnew, 0)
UBnew = numpy.append(UBnew, 10000)
fNew = numpy.append(fNew, 0)
BMrxnCol = numpy.ones((len(row_namesNew), 1))
BMrxnCol[:, 0] = bNew
if maintenanceToBM:
MaintenanceTarget = LBnew[col_namesNew.index('R_maintenance_atp')]
BMrxnCol[row_namesNew.index('M_atp_c')] += MaintenanceTarget
BMrxnCol[row_namesNew.index('M_h2o_c')] += MaintenanceTarget
BMrxnCol[row_namesNew.index('M_adp_c')] -= MaintenanceTarget
BMrxnCol[row_namesNew.index('M_pi_c')] -= MaintenanceTarget
BMrxnCol[row_namesNew.index('M_h_c')] -= MaintenanceTarget
LBnew[col_namesNew.index('R_maintenance_atp')] = 0
Anew2 = numpy.append(Anew2, -BMrxnCol, axis=1)
bNew = numpy.array([0]*Anew2.shape[0])
Matrix1 = RBA_Matrix()
Matrix1.A = scipy.sparse.coo_matrix(Anew2)
Matrix1.b = bNew
Matrix1.LB = LBnew
Matrix1.UB = UBnew
Matrix1.row_signs = row_signsNew
Matrix1.row_names = row_namesNew
Matrix1.col_names = col_namesNew
Matrix1.f = fNew
if type == 'classic':
Matrix1.b = numpy.array([0]*len(row_signsNew))
LP1 = RBA_LP()
LP1.loadMatrix(Matrix1)
elif type == 'parsi':
MetaboliteRows = {i: Matrix1.row_names.index(
i) for i in Matrix1.row_names if i.startswith('M_')}
EnzymeCols = {i: Matrix1.col_names.index(
i) for i in Matrix1.col_names if i.startswith('R_') and '_enzyme' in i}
Matrix2 = RBA_Matrix()
Matrix2.A = scipy.sparse.coo_matrix(numpy.zeros((len(MetaboliteRows), len(EnzymeCols))))
Matrix2.b = numpy.array(Matrix1.b[list(MetaboliteRows.values())])
Matrix2.LB = numpy.array(Matrix1.LB[list(EnzymeCols.values())])
Matrix2.UB = numpy.array(Matrix1.UB[list(EnzymeCols.values())])
Matrix2.f = numpy.array(Matrix1.f[list(EnzymeCols.values())])
Matrix2.row_signs = [Matrix1.row_signs[i] for i in list(MetaboliteRows.values())]
Matrix2.row_names = list(MetaboliteRows.keys())
Matrix2.col_names = list(EnzymeCols.keys())
Matrix2.mapIndices()
Matrix1.b = numpy.array([0]*len(bNew))
LP1 = RBA_LP()
LP1.loadMatrix(Matrix1)
LP1.updateMatrix(Matrix2)
self.FBA = RBA_FBA(LP1)
def findMinMediumConcentration(self, metabolite, precision=0.00001, max=100, recording=False, loggingIntermediateSteps=False):
"""
Applies dichotomy-search to find the minimal feasible concentration of
growth-substrate in medium, at a previously set growth-rate.
Parameters
----------
metabolite : str
ID of metabolite in medium.
precision : float
Numberic precision with which minimum is approximated.
Default : 0.00001
max : float
Defines the highest concentration rate to be screened for.
Default=100
recording : bool
Records intermediate feasible solutions
while approaching the minimum concentration.
Default : False
Returns
-------
minimum feasible growth-substrate concentration as float.
"""
minConc = 0.0
maxConc = max
testConc = minConc
iteration = 0
oldConc = self.Medium[metabolite]
while (maxConc - minConc) > precision:
self.setMedium(changes={metabolite: testConc})
self.Problem.solveLP(logging=loggingIntermediateSteps)
if self.Problem.Solved:
iteration += 1
if recording:
run_name = 'Dichotomy_'+metabolite+'_' + \
str(testConc)+'_iteration_'+str(iteration)
self.recordResults(run_name)
maxConc = testConc
else:
minConc = testConc
testConc = numpy.mean([maxConc, minConc])
self.LogBook.addEntry(
'Minimal required {} concentration found to be: {}.'.format(metabolite, maxConc))
self.setMedium(changes={metabolite: oldConc})
return(maxConc)
def addProtein(self, input):
"""
Adds representation of individual proteins to problem.
Parameters
----------
input : dict or str
If input is str it has to be the ID of a protein in the model.
Then this protein is added to the problem an creates:
One constraint named Protein_'ID' (equality).
One variable named TotalLevel_'ID' representing the total amount.
One variable named Free_'ID'_'respectiveCompartment', this
represents the fraction of the protein not assuming any function.
It however consumes resources for synthesis (precursors and processes),
which are the same as defined in the model files.
And takes up space i the compartment as specified in the model-files
for the protein.
If input is dict it has to have two keys; 'ID' and 'UnusedProteinFraction'.
By specifying this input one can define that the unused franction of the protein
can also reside in other compartments and which processes it requires.
The value to 'ID' is the ID of a protein in the model.
The value to 'UnusedProteinFraction' is another dictionary.
This can have several keys which must be model-compartments.
For each of the keys the value is a dict holding IDs of model-processes as Keys
and process requirements as Values (numerical).
This specifies which processes each of the compartment-species of the protein
requires.
This generates the same constraint and TotalLevel-variable as with the simple input,
however a variable representing each of the compartment-species for the unused fraction
is added and incorporates the specific process requirements.
E.g: input = {'ID': 'proteinA',
'UnusedProteinFraction':{'Cytoplasm':{'Translation':100}, {'Folding':10}],
'Membrane':{'Translation':100}, {'Folding':20}, {'Secretion':100}
}
}
This adds 'proteinA' to the model, where the unused fraction can reside either in
the Cytoplasm or the Membrane. However while the cytosolic-species only requires the
processes 'Translation' and 'Folding'; the membrane-bound species also requires 'Secretion'
and occupies more folding capacity.
Then the constraint 'Protein_proteinA' is added and the 3 variables
'TotalLevel_proteinA', 'Free_proteinA_Cytoplasm' and 'Free_proteinA_Membrane'.
"""
if type(input) is str:
input = {'ID': input}
if 'ID' not in list(input.keys()):
print('Error, no protein ID provided')
return
if input['ID'] not in list(self.ModelStructure.ProteinInfo.Elements.keys()):
print('Error, protein not in model')
return
if 'UnusedProteinFraction' not in list(input.keys()):
input.update({'UnusedProteinFraction':
{self.ModelStructure.ProteinInfo.Elements[input['ID']]['Compartment']:
self.ModelStructure.ProteinInfo.Elements[input['ID']]['ProcessRequirements']}})
self.LogBook.addEntry('Protein {} added with specifications {}.'.format(
input['ID'], str(json.dumps(input))))
Muindexlist = []
## Building RBA_Matrix-object for new constraint-row, representing protein ##
UsedProtein = RBA_Matrix()
UsedProtein.A = scipy.sparse.coo_matrix(
buildUsedProteinConstraint(Controler=self, protein=input['ID']))
UsedProtein.b = numpy.array([float(0)])
UsedProtein.f = numpy.array(self.Problem.LP.f)
UsedProtein.LB = numpy.array(self.Problem.LP.LB)
UsedProtein.UB = numpy.array(self.Problem.LP.UB)
UsedProtein.row_signs = ['E']
UsedProtein.row_names = ['Protein_'+input['ID']]
UsedProtein.col_names = self.Problem.LP.col_names
## Add used protein row to problem ##
self.Problem.LP.addMatrix(matrix=UsedProtein)
## Add used protein row to reference Matrix (Mu == 1) ##
self.Problem.MuOneMatrix.addMatrix(matrix=UsedProtein)
## Building RBA_Matrix-object for new variable-col, representing total level of protein ##
TotProtein = RBA_Matrix()
TotProtein.A = scipy.sparse.coo_matrix(numpy.array(numpy.matrix(
numpy.array([float(0)]*self.Problem.LP.A.shape[0]+[float(-1)])).transpose()))
TotProtein.f = numpy.array([float(0)])
TotProtein.LB = numpy.array([float(0)])
TotProtein.UB = numpy.array([float(100000.0)])
TotProtein.b = numpy.array(list(self.Problem.LP.b)+list(UsedProtein.b))
TotProtein.row_signs = self.Problem.LP.row_signs+UsedProtein.row_signs
TotProtein.row_names = self.Problem.LP.row_names+UsedProtein.row_names
TotProtein.col_names = ['TotalLevel_'+input['ID']]
## Add total protein col to problem ##
self.Problem.LP.addMatrix(matrix=TotProtein)
## Add total protein col to reference Matrix (Mu == 1) ##
self.Problem.MuOneMatrix.addMatrix(matrix=TotProtein)
## Building RBA_Matrix-object for new variable-col,##
## representing each compartment-species of the protein ##
for comp_species in list(input['UnusedProteinFraction'].keys()):
## Initiate RBA_Matrix object##
UnusedProtein = RBA_Matrix()
UnusedProtein.col_names = ['Free_'+input['ID']+'_'+comp_species]
## Extract required processes for protein and the respective demand ##
ProcIDs = list(input['UnusedProteinFraction'][comp_species].keys())
Preq = list(input['UnusedProteinFraction'][comp_species].values())
ProcessCost = dict(
zip([self.ModelStructure.ProcessInfo.Elements[k]['ID'] for k in ProcIDs], Preq))
## Get required charged trna buildingblocks and their stoichiometry in protein ##
composition = self.ModelStructure.ProteinInfo.Elements[input['ID']]['AAcomposition']
## Extract the composition of charged trnas in terms of metabolic species ##
species = self.ModelStructure.ProcessInfo.Elements['Translation']['Components']
## Determine required metabolites and their stoichiometry in protein ##
MetaboliteCost = buildCompositionofUnusedProtein(
species=species, composition=composition)
## Assemble process and metabolite requirements into stoichiometric coloumn vector ##
## And add to RBA_Matrix object ##
colToAdd = numpy.array(numpy.matrix(numpy.array(list(MetaboliteCost.values())+list(ProcessCost.values()) +
[float(1)]+[self.ModelStructure.ProteinInfo.Elements[input['ID']]['AAnumber']])).transpose())
UnusedProtein.A = scipy.sparse.coo_matrix(colToAdd)
## Add other information to RBA_Matrix object ##
UnusedProtein.row_names = list(MetaboliteCost.keys())+[str(pc+'_capacity') for pc in list(
ProcessCost.keys())]+['Protein_'+input['ID']]+[str(comp_species + '_density')]
UnusedProtein.b = numpy.zeros(len(UnusedProtein.row_names))
UnusedProtein.row_signs = ['E']*len(UnusedProtein.row_names)
UnusedProtein.LB = numpy.array([float(0)])
UnusedProtein.UB = numpy.array([float(100000.0)])
UnusedProtein.f = numpy.array([float(0)])
self.ProteinDilutionIndices = list(
zip(list(MetaboliteCost.keys()), UnusedProtein.col_names*len(list(MetaboliteCost.keys()))))
## Add free protein col to problem ##
self.Problem.LP.addMatrix(matrix=UnusedProtein)
## Add free protein col to reference Matrix (Mu == 1) ##
self.Problem.MuOneMatrix.addMatrix(matrix=UnusedProtein)
## Find coefficients of unused protein column, subject to dilution (Metabolite and Process cost) ##
## And add them to MuDepIndices_A ##
nonZeroEntries = numpy.where(UnusedProtein.A != 0)[0]
self.Problem.MuDepIndices_A += [(UnusedProtein.row_names[i], UnusedProtein.col_names[0]) for i in nonZeroEntries if UnusedProtein.row_names[i]
!= 'Protein_'+input['ID'] and UnusedProtein.row_names[i] not in self.Problem.CompartmentDensities]
self.setMu(self.Problem.Mu)
## !!! ##
def eukaryoticDensities(self, totalAA=3.1, CompartmentRelationships=True, CompartmentComponents=False):
Compartments = ['n', 'mIM', 'vM', 'mIMS', 'm', 'erM', 'mOM', 'x', 'c', 'cM', 'gM']
Signs = ['L', 'L', 'L', 'L', 'L', 'L', 'L', 'L', 'L', 'L', 'L']
totalAA = 3.1*0.71
m_mIM = 0.66
m_mIMS = 2
m_mOM = 8
DensityIndices = [self.Problem.LP.row_names.index(
i) for i in self.Problem.CompartmentDensities]
A = self.Problem.LP.A.toarray()
A[numpy.min(DensityIndices):numpy.max(DensityIndices)+1, :] /= totalAA
self.Problem.LP.A = scipy.sparse.coo_matrix(A)
A0 = self.Problem.MuOneMatrix.A.toarray()
A0[numpy.min(DensityIndices):numpy.max(DensityIndices)+1, :] /= totalAA
self.Problem.MuOneMatrix.A = scipy.sparse.coo_matrix(A0)
CompartmentMatrix = RBA_Matrix()
A = numpy.ones((len(Compartments)+1, len(Compartments)))
Eye = -numpy.eye(len(Compartments))
A[0:len(Compartments), :] = Eye
CompartmentMatrix.A = scipy.sparse.coo_matrix(A)
CompartmentMatrix.b = numpy.array([float(0)]*len(Compartments)+[float(1)])
CompartmentMatrix.f = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.LB = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.UB = numpy.array([float(1)]*len(Compartments))
CompartmentMatrix.row_signs = ['L']*len(Compartments)+['E']
# CompartmentMatrix.row_signs = ['E']*(len(Compartments)+1)
CompartmentMatrix.row_names = ['n_density', 'mIM_density', 'vM_density', 'mIMS_density', 'm_density',
'erM_density', 'mOM_density', 'x_density', 'cM_density', 'gM_density', 'c_density', 'TotalCapacity']
CompartmentMatrix.col_names = ['F_n', 'F_mIM', 'F_vM', 'F_mIMS',
'F_m', 'F_erM', 'F_mOM', 'F_x', 'F_cM', 'F_gM', 'F_c']
# CompartmentMatrix.row_signs[CompartmentMatrix.col_names.index('F_m')]='E'
if CompartmentRelationships:
Anew = numpy.zeros((A.shape[0]+3, A.shape[1]))
Anew[0:A.shape[0], :] = A
CompartmentMatrix.row_names += ['m_mIM', 'm_mIMS', 'm_mOM']
CompartmentMatrix.row_signs += ['E', 'E', 'E']
CompartmentMatrix.b = numpy.array(list(CompartmentMatrix.b)+[float(0)]*3)
Anew[CompartmentMatrix.row_names.index(
'm_mIM'), CompartmentMatrix.col_names.index('F_m')] = float(1)
Anew[CompartmentMatrix.row_names.index(
'm_mIMS'), CompartmentMatrix.col_names.index('F_m')] = float(1)
Anew[CompartmentMatrix.row_names.index(
'm_mOM'), CompartmentMatrix.col_names.index('F_m')] = float(1)
Anew[CompartmentMatrix.row_names.index(
'm_mIM'), CompartmentMatrix.col_names.index('F_mIM')] = -m_mIM
Anew[CompartmentMatrix.row_names.index(
'm_mIMS'), CompartmentMatrix.col_names.index('F_mIMS')] = -m_mIMS
Anew[CompartmentMatrix.row_names.index(
'm_mOM'), CompartmentMatrix.col_names.index('F_mOM')] = -m_mOM
CompartmentMatrix.A = scipy.sparse.coo_matrix(Anew)
self.Problem.LP.addMatrix(matrix=CompartmentMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=CompartmentMatrix)
if CompartmentComponents:
AlipidsA = numpy.zeros((7, len(Compartments)))
Alipids = RBA_Matrix()
Alipids.col_names = ['F_n', 'F_mIM', 'F_vM', 'F_mIMS',
'F_m', 'F_erM', 'F_mOM', 'F_x', 'F_cM', 'F_gM', 'F_c']
Alipids.row_names = ['M_pc_SC_c', 'M_pe_SC_c', 'M_ptd1ino_SC_c',
'M_ps_SC_c', 'M_clpn_SC_m', 'M_pa_SC_c', 'M_ergst_c']
Alipids.row_signs += ['E', 'E', 'E', 'E', 'E', 'E', 'E']
Alipids.b = numpy.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
Alipids.LB = numpy.array([float(0)]*len(Compartments))
Alipids.UB = numpy.array([float(1)]*len(Compartments))
Alipids.f = numpy.array([float(0)]*len(Compartments))
AlipidsA[Alipids.row_names.index('M_pc_SC_c'), Alipids.col_names.index(
'F_mIM')] = -0.0000883*totalAA
AlipidsA[Alipids.row_names.index('M_pe_SC_c'), Alipids.col_names.index(
'F_mIM')] = -0.00005852*totalAA
AlipidsA[Alipids.row_names.index('M_ptd1ino_SC_c'),
Alipids.col_names.index('F_mIM')] = -0.00003377*totalAA
AlipidsA[Alipids.row_names.index('M_ps_SC_c'), Alipids.col_names.index(
'F_mIM')] = -0.00000873*totalAA
AlipidsA[Alipids.row_names.index('M_clpn_SC_m'),
Alipids.col_names.index('F_mIM')] = -0.00002*totalAA
AlipidsA[Alipids.row_names.index('M_pa_SC_c'), Alipids.col_names.index(
'F_mIM')] = -0.0000039*totalAA
AlipidsA[Alipids.row_names.index(
'M_ergst_c'), Alipids.col_names.index('F_mIM')] = -0.008547*totalAA
self.Problem.MuDepIndices_A += [('M_pc_SC_c', 'F_mIM'), ('M_pe_SC_c', 'F_mIM'), ('M_ptd1ino_SC_c', 'F_mIM'),
('M_ps_SC_c', 'F_mIM'), ('M_clpn_SC_m', 'F_mIM'), ('M_pa_SC_c', 'F_mIM'), ('M_ergst_c', 'F_mIM')]
AlipidsA[Alipids.row_names.index(
'M_pc_SC_c'), Alipids.col_names.index('F_mOM')] = -0.000636*totalAA
AlipidsA[Alipids.row_names.index('M_pe_SC_c'), Alipids.col_names.index(
'F_mOM')] = -0.0004822*totalAA
AlipidsA[Alipids.row_names.index('M_ptd1ino_SC_c'),
Alipids.col_names.index('F_mOM')] = -0.0001289*totalAA
AlipidsA[Alipids.row_names.index('M_ps_SC_c'), Alipids.col_names.index(
'F_mOM')] = -0.0000167*totalAA
AlipidsA[Alipids.row_names.index('M_clpn_SC_m'), Alipids.col_names.index(
'F_mOM')] = -0.00004467*totalAA
AlipidsA[Alipids.row_names.index('M_pa_SC_c'), Alipids.col_names.index(
'F_mOM')] = -0.0000696*totalAA
self.Problem.MuDepIndices_A += [('M_pc_SC_c', 'F_mOM'), ('M_pe_SC_c', 'F_mOM'), ('M_ptd1ino_SC_c',
'F_mOM'), ('M_ps_SC_c', 'F_mOM'), ('M_clpn_SC_m', 'F_mOM'), ('M_pa_SC_c', 'F_mOM')]
Alipids.A = scipy.sparse.coo_matrix(AlipidsA)
Alipids.mapIndices()
self.Problem.LP.updateMatrix(Alipids, Ainds=[('M_pc_SC_c', 'F_mIM'), ('M_pe_SC_c', 'F_mIM'), ('M_ptd1ino_SC_c', 'F_mIM'), ('M_ps_SC_c', 'F_mIM'), ('M_clpn_SC_m', 'F_mIM'), ('M_pa_SC_c', 'F_mIM'), (
'M_ergst_c', 'F_mIM'), ('M_pc_SC_c', 'F_mOM'), ('M_pe_SC_c', 'F_mOM'), ('M_ptd1ino_SC_c', 'F_mOM'), ('M_ps_SC_c', 'F_mOM'), ('M_clpn_SC_m', 'F_mOM'), ('M_pa_SC_c', 'F_mOM')])
self.Problem.MuOneMatrix.updateMatrix(Alipids, Ainds=[('M_pc_SC_c', 'F_mIM'), ('M_pe_SC_c', 'F_mIM'), ('M_ptd1ino_SC_c', 'F_mIM'), ('M_ps_SC_c', 'F_mIM'), ('M_clpn_SC_m', 'F_mIM'), (
'M_pa_SC_c', 'F_mIM'), ('M_ergst_c', 'F_mIM'), ('M_pc_SC_c', 'F_mOM'), ('M_pe_SC_c', 'F_mOM'), ('M_ptd1ino_SC_c', 'F_mOM'), ('M_ps_SC_c', 'F_mOM'), ('M_clpn_SC_m', 'F_mOM'), ('M_pa_SC_c', 'F_mOM')])
## !!! ##
def eukaryoticDensities2(self, totalAA=3.1, CompartmentRelationships=True, CompartmentComponents=False):
Compartments = ['n', 'mIM', 'vM', 'mIMS', 'm', 'erM', 'mOM', 'x', 'c', 'cM', 'gM']
totalAA = 3.1*0.69
m_mIM = 1.11
m_mIMS = 0.7
m_mOM = 7.2
DensityIndices = [self.Problem.LP.row_names.index(
i) for i in self.Problem.CompartmentDensities]
A = self.Problem.LP.A.toarray()
A[numpy.min(DensityIndices):numpy.max(DensityIndices)+1, :] /= totalAA
self.Problem.LP.A = scipy.sparse.coo_matrix(A)
A0 = self.Problem.MuOneMatrix.A.toarray()
A0[numpy.min(DensityIndices):numpy.max(DensityIndices)+1, :] /= totalAA
self.Problem.MuOneMatrix.A = scipy.sparse.coo_matrix(A0)
CompartmentMatrix = RBA_Matrix()
A = numpy.ones((len(Compartments)+1, len(Compartments)))
Eye = -numpy.eye(len(Compartments))
A[0:len(Compartments), :] = Eye
CompartmentMatrix.A = scipy.sparse.coo_matrix(A)
CompartmentMatrix.b = numpy.array([float(0)]*len(Compartments)+[float(1)])
CompartmentMatrix.f = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.LB = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.UB = numpy.array([float(1)]*len(Compartments))
CompartmentMatrix.row_signs = ['L']*(len(Compartments)+1)
# CompartmentMatrix.row_signs = ['E']*(len(Compartments)+1)
CompartmentMatrix.row_names = ['n_density', 'mIM_density', 'vM_density', 'mIMS_density', 'm_density',
'erM_density', 'mOM_density', 'x_density', 'cM_density', 'gM_density', 'c_density', 'TotalCapacity']
CompartmentMatrix.col_names = ['F_n', 'F_mIM', 'F_vM', 'F_mIMS',
'F_m', 'F_erM', 'F_mOM', 'F_x', 'F_cM', 'F_gM', 'F_c']
# CompartmentMatrix.row_signs[CompartmentMatrix.col_names.index('F_m')]='E'
if CompartmentRelationships:
Anew = numpy.zeros((A.shape[0]+3, A.shape[1]))
Anew[0:A.shape[0], :] = A
CompartmentMatrix.row_names += ['m_mIM', 'm_mIMS', 'm_mOM']
CompartmentMatrix.row_signs += ['E', 'E', 'E']
CompartmentMatrix.b = numpy.array(list(CompartmentMatrix.b)+[float(0)]*3)
Anew[CompartmentMatrix.row_names.index(
'm_mIM'), CompartmentMatrix.col_names.index('F_m')] = float(1)
Anew[CompartmentMatrix.row_names.index(
'm_mIMS'), CompartmentMatrix.col_names.index('F_m')] = float(1)
Anew[CompartmentMatrix.row_names.index(
'm_mOM'), CompartmentMatrix.col_names.index('F_m')] = float(1)
Anew[CompartmentMatrix.row_names.index(
'm_mIM'), CompartmentMatrix.col_names.index('F_mIM')] = -m_mIM
Anew[CompartmentMatrix.row_names.index(
'm_mIMS'), CompartmentMatrix.col_names.index('F_mIMS')] = -m_mIMS
Anew[CompartmentMatrix.row_names.index(
'm_mOM'), CompartmentMatrix.col_names.index('F_mOM')] = -m_mOM
CompartmentMatrix.A = scipy.sparse.coo_matrix(Anew)
self.Problem.LP.addMatrix(matrix=CompartmentMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=CompartmentMatrix)
if CompartmentComponents:
PC_mIM = 0.0000883
PE_mIM = 0.00005852
PI_mIM = 0.00003377
PS_mIM = 0.00000873
CL_mIM = 0.00002
PA_mIM = 0.0000039
ES_mIM = 0.008547
PC_mOM = 0.000636
PE_mOM = 0.0004822
PI_mOM = 0.0001289
PS_mOM = 0.0000167
CL_mOM = 0.00004467
PA_mOM = 0.0000696
ES_mOM = 0.0
ConstraintMatrix = numpy.zeros((7, 0))
Alipids = RBA_Matrix()
Alipids.col_names = []
Alipids.row_names = ['M_pc_SC_c', 'M_pe_SC_c', 'M_ptd1ino_SC_c',
'M_ps_SC_c', 'M_clpn_SC_m', 'M_pa_SC_c', 'M_ergst_c']
Alipids.row_signs = [
self.Problem.LP.row_signs[self.Problem.LP.row_names.index(i)] for i in Alipids.row_names]
Alipids.b = numpy.array(
[self.Problem.LP.b[self.Problem.LP.row_names.index(i)] for i in Alipids.row_names])
Alipids.LB = numpy.array([])
Alipids.UB = numpy.array([])
Alipids.f = numpy.array([])
MudepIndices = []
for pc in self.ModelStructure.ProcessInfo.Elements.keys():
if self.ModelStructure.ProcessInfo.Elements[pc]['ID'] not in self.Problem.LP.col_names:
continue
ConstraintMatrixNew = numpy.zeros(
(ConstraintMatrix.shape[0], ConstraintMatrix.shape[1]+1))
ConstraintMatrixNew[:, 0:ConstraintMatrix.shape[1]] = ConstraintMatrix
Alipids.col_names.append(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
# Alipids.LB = numpy.array(list(Alipids.LB).append(list(self.Problem.LP.LB)[
# self.Problem.LP.col_names.index(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])]))
# Alipids.UB = numpy.array(list(Alipids.UB).append(list(self.Problem.LP.UB)[
# self.Problem.LP.col_names.index(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])]))
# Alipids.f = numpy.array(list(Alipids.f).append(list(self.Problem.LP.f)[
# self.Problem.LP.col_names.index(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])]))
Alipids.LB = numpy.concatenate([Alipids.LB, numpy.array(
list(self.Problem.LP.LB)[self.Problem.LP.col_names.index(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])])])
Alipids.UB = numpy.concatenate([Alipids.UB, numpy.array(
list(self.Problem.LP.UB)[self.Problem.LP.col_names.index(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])])])
Alipids.f = numpy.concatenate([Alipids.f, numpy.array(
list(self.Problem.LP.f)[self.Problem.LP.col_names.index(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])])])
for p in self.ModelStructure.ProcessInfo.Elements[pc]['Composition'].keys():
lE = sum(list(self.ModelStructure.ProteinInfo.Elements[p]['AAcomposition'].values(
)))*self.ModelStructure.ProcessInfo.Elements[pc]['Composition'][p]
if self.ModelStructure.ProteinInfo.Elements[p]['Compartment'] == 'mOM':
ConstraintMatrixNew[Alipids.col_names.index(
'M_pc_SC_c'), ConstraintMatrix.shape[1]] -= PC_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pe_SC_c'), ConstraintMatrix.shape[1]] -= PE_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ptd1ino_SC_c'), ConstraintMatrix.shape[1]] -= PI_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ps_SC_c'), ConstraintMatrix.shape[1]] -= PS_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_clpn_SC_m'), ConstraintMatrix.shape[1]] -= CL_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pa_SC_c'), ConstraintMatrix.shape[1]] -= PA_mOM*lE/totalAA
MudepIndices += ('M_pc_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_pe_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_ptd1ino_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_ps_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_clpn_SC_m',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_pa_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
elif self.ModelStructure.ProteinInfo.Elements[p]['Compartment'] == 'mIM':
ConstraintMatrixNew[Alipids.col_names.index(
'M_pc_SC_c'), ConstraintMatrix.shape[1]] -= PC_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pe_SC_c'), ConstraintMatrix.shape[1]] -= PE_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ptd1ino_SC_c'), ConstraintMatrix.shape[1]] -= PI_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ps_SC_c'), ConstraintMatrix.shape[1]] -= PS_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_clpn_SC_m'), ConstraintMatrix.shape[1]] -= CL_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pa_SC_c'), ConstraintMatrix.shape[1]] -= PA_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ergst_c'), ConstraintMatrix.shape[1]] -= ES_mIM*lE/totalAA
MudepIndices += ('M_pc_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_pe_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_ptd1ino_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_ps_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_clpn_SC_m',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_pa_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_ergst_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
ConstraintMatrix = ConstraintMatrixNew
for e in self.ModelStructure.EnzymeInfo.Elements.keys():
if e not in self.Problem.LP.col_names:
continue
ConstraintMatrixNew = numpy.zeros(
(ConstraintMatrix.shape[0], ConstraintMatrix.shape[1]+1))
ConstraintMatrixNew[:, 0:ConstraintMatrix.shape[1]] = ConstraintMatrix
Alipids.col_names.append(e)
# xnew = list(self.Problem.LP.LB)[self.Problem.LP.col_names.index(e)]
Alipids.LB = numpy.concatenate([Alipids.LB, numpy.array(
list(self.Problem.LP.LB)[self.Problem.LP.col_names.index(e)])])
Alipids.UB = numpy.concatenate([Alipids.UB, numpy.array(
list(self.Problem.LP.UB)[self.Problem.LP.col_names.index(e)])])
Alipids.f = numpy.concatenate([Alipids.f, numpy.array(
list(self.Problem.LP.f)[self.Problem.LP.col_names.index(e)])])
# Alipids.LB = numpy.array(list(Alipids.LB).append(xnew))
# Alipids.UB = numpy.array(list(Alipids.UB).append(
# list(self.Problem.LP.UB)[self.Problem.LP.col_names.index(e)]))
# Alipids.f = numpy.array(list(Alipids.f).append(
# list(self.Problem.LP.f)[self.Problem.LP.col_names.index(e)]))
for p in self.ModelStructure.EnzymeInfo.Elements[e]['Subunits'].keys():
lE = sum(
list(self.ModelStructure.ProteinInfo.Elements[p]['AAcomposition'].values()))
lE *= self.ModelStructure.EnzymeInfo.Elements[e]['Subunits'][p]['StochFac']
if self.ModelStructure.ProteinInfo.Elements[p]['Compartment'] == 'mOM':
ConstraintMatrixNew[Alipids.col_names.index(
'M_pc_SC_c'), ConstraintMatrix.shape[1]] -= PC_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pe_SC_c'), ConstraintMatrix.shape[1]] -= PE_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ptd1ino_SC_c'), ConstraintMatrix.shape[1]] -= PI_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ps_SC_c'), ConstraintMatrix.shape[1]] -= PS_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_clpn_SC_m'), ConstraintMatrix.shape[1]] -= CL_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pa_SC_c'), ConstraintMatrix.shape[1]] -= PA_mOM*lE/totalAA
MudepIndices += ('M_pc_SC_c', e)
MudepIndices += ('M_pe_SC_c', e)
MudepIndices += ('M_ptd1ino_SC_c', e)
MudepIndices += ('M_ps_SC_c', e)
MudepIndices += ('M_clpn_SC_m', e)
MudepIndices += ('M_pa_SC_c', e)
elif self.ModelStructure.ProteinInfo.Elements[p]['Compartment'] == 'mIM':
ConstraintMatrixNew[Alipids.col_names.index(
'M_pc_SC_c'), ConstraintMatrix.shape[1]] -= PC_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pe_SC_c'), ConstraintMatrix.shape[1]] -= PE_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ptd1ino_SC_c'), ConstraintMatrix.shape[1]] -= PI_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ps_SC_c'), ConstraintMatrix.shape[1]] -= PS_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_clpn_SC_m'), ConstraintMatrix.shape[1]] -= CL_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pa_SC_c'), ConstraintMatrix.shape[1]] -= PA_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ergst_c'), ConstraintMatrix.shape[1]] -= ES_mIM*lE/totalAA
MudepIndices += ('M_pc_SC_c', e)
MudepIndices += ('M_pe_SC_c', e)
MudepIndices += ('M_ptd1ino_SC_c', e)
MudepIndices += ('M_ps_SC_c', e)
MudepIndices += ('M_clpn_SC_m', e)
MudepIndices += ('M_pa_SC_c', e)
MudepIndices += ('M_ergst_c', e)
ConstraintMatrix = ConstraintMatrixNew
self.Problem.MuDepIndices_A += MudepIndices
Alipids.A = scipy.sparse.coo_matrix(ConstraintMatrix)
Alipids.mapIndices()
self.Problem.LP.updateMatrix(Alipids, Ainds=MudepIndices)
self.Problem.LP.updateMatrix(MuOneMatrix, Ainds=MudepIndices)
## !!! ##
def eukaryoticDensities3(self, totalAA=3.1, VolumeFraction=False, CompartmentRelationships=True, CompartmentComponents=False):
Compartments = ['n', 'mIM', 'vM', 'mIMS', 'm', 'erM', 'mOM', 'x', 'c', 'cM', 'gM']
totalAA = 3.1*0.91
m_mIM = 0.66
m_mIMS = 2
m_mOM = 8
DensityIndices = [self.Problem.LP.row_names.index(
i) for i in self.Problem.CompartmentDensities]
A = self.Problem.LP.A.toarray()
# A[numpy.min(DensityIndices):numpy.max(DensityIndices)+1, :] /= totalAA
# self.Problem.LP.A = scipy.sparse.coo_matrix(A)
A0 = self.Problem.MuOneMatrix.A.toarray()
# A0[numpy.min(DensityIndices):numpy.max(DensityIndices)+1, :] /= totalAA
# self.Problem.MuOneMatrix.A = scipy.sparse.coo_matrix(A0)
OccupationMatrix = RBA_Matrix()
# A = numpy.ones((len(Compartments)+1, len(Compartments)))
A = -numpy.eye(len(Compartments))
# Eye = -numpy.eye(len(Compartments))
# A[0:len(Compartments), :] = Eye
OccupationMatrix.A = scipy.sparse.coo_matrix(A)
# OccupationMatrix.b = numpy.array([-0.209*totalAA]+[float(0)]*(len(Compartments)-1)+[float(totalAA)])
OccupationMatrix.b = numpy.array([-0.209*totalAA]+[float(0)]*(len(Compartments)-1))
OccupationMatrix.f = numpy.array([float(0)]*len(Compartments))
OccupationMatrix.LB = numpy.array([float(0)]*len(Compartments))
OccupationMatrix.UB = numpy.array([float(totalAA)]*len(Compartments))
# OccupationMatrix.row_signs = ['E']*(len(Compartments))+['L']
OccupationMatrix.row_signs = ['E']*(len(Compartments))
# OccupationMatrix.row_names = ['n_density', 'mIM_density', 'vM_density', 'mIMS_density', 'm_density',
# 'erM_density', 'mOM_density', 'x_density', 'cM_density', 'gM_density', 'c_density', 'TotalProtein']
OccupationMatrix.row_names = ['n_density', 'mIM_density', 'vM_density', 'mIMS_density',
'm_density', 'erM_density', 'mOM_density', 'x_density', 'cM_density', 'gM_density', 'c_density']
OccupationMatrix.col_names = ['O_n', 'O_mIM', 'O_vM', 'O_mIMS',
'O_m', 'O_erM', 'O_mOM', 'O_x', 'O_cM', 'O_gM', 'O_c']
# CompartmentMatrix.row_signs[CompartmentMatrix.col_names.index('F_m')]='E'
OccupationMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=OccupationMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=OccupationMatrix)
CompartmentMatrix = RBA_Matrix()
if VolumeFraction:
A = numpy.eye(len(Compartments))*5/float(totalAA)
else:
A = numpy.eye(len(Compartments))/float(totalAA)
CompartmentMatrix.A = scipy.sparse.coo_matrix(A)
CompartmentMatrix.b = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.f = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.LB = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.UB = numpy.array([float(totalAA)]*len(Compartments))
CompartmentMatrix.row_signs = ['L']*(len(Compartments))
# CompartmentMatrix.row_signs = ['E']*(len(Compartments))
CompartmentMatrix.row_names = ['n_volume', 'mIM_volume', 'vM_volume', 'mIMS_volume',
'm_volume', 'erM_volume', 'mOM_volume', 'x_volume', 'cM_volume', 'gM_volume', 'c_volume']
CompartmentMatrix.col_names = ['O_n', 'O_mIM', 'O_vM', 'O_mIMS',
'O_m', 'O_erM', 'O_mOM', 'O_x', 'O_cM', 'O_gM', 'O_c']
CompartmentMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=CompartmentMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=CompartmentMatrix)
VolumeMatrix = RBA_Matrix()
A = numpy.ones((len(Compartments)+1, len(Compartments)))
Eye = -numpy.eye(len(Compartments))
A[0:len(Compartments), :] = Eye
# A[len(Compartments), [1, 5, 6, 8, 9]] = 0
# A[len(Compartments), 8] = 0
VolumeMatrix.A = scipy.sparse.coo_matrix(A)
VolumeMatrix.b = numpy.array([float(0)]*len(Compartments)+[float(1)])
VolumeMatrix.f = numpy.array([float(0)]*len(Compartments))
VolumeMatrix.LB = numpy.array([float(0)]*len(Compartments))
VolumeMatrix.UB = numpy.array([float(1)]*len(Compartments))
VolumeMatrix.row_signs = ['L']*(len(Compartments))+['E']
# VolumeMatrix.row_signs = ['E']*(len(Compartments))+['E']
VolumeMatrix.row_names = ['n_volume', 'mIM_volume', 'vM_volume', 'mIMS_volume', 'm_volume',
'erM_volume', 'mOM_volume', 'x_volume', 'cM_volume', 'gM_volume', 'c_volume', 'TotalVolume']
VolumeMatrix.col_names = ['F_n', 'F_mIM', 'F_vM', 'F_mIMS',
'F_m', 'F_erM', 'F_mOM', 'F_x', 'F_cM', 'F_gM', 'F_c']
if not CompartmentRelationships:
VolumeMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=VolumeMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=VolumeMatrix)
if CompartmentRelationships:
Anew = numpy.zeros((A.shape[0]+3, A.shape[1]))
Anew[0:A.shape[0], :] = A
VolumeMatrix.row_names += ['m_mIM', 'm_mIMS', 'm_mOM']
VolumeMatrix.row_signs += ['E', 'E', 'E']
VolumeMatrix.b = numpy.array(list(VolumeMatrix.b)+[float(0)]*3)
Anew[VolumeMatrix.row_names.index(
'm_mIM'), VolumeMatrix.col_names.index('F_m')] = float(1)
Anew[VolumeMatrix.row_names.index(
'm_mIMS'), VolumeMatrix.col_names.index('F_m')] = float(1)
Anew[VolumeMatrix.row_names.index(
'm_mOM'), VolumeMatrix.col_names.index('F_m')] = float(1)
Anew[VolumeMatrix.row_names.index(
'm_mIM'), VolumeMatrix.col_names.index('F_mIM')] = -m_mIM
Anew[VolumeMatrix.row_names.index(
'm_mIMS'), VolumeMatrix.col_names.index('F_mIMS')] = -m_mIMS
Anew[VolumeMatrix.row_names.index(
'm_mOM'), VolumeMatrix.col_names.index('F_mOM')] = -m_mOM
VolumeMatrix.A = scipy.sparse.coo_matrix(Anew)
VolumeMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=VolumeMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=VolumeMatrix)
if CompartmentComponents:
PC_mIM = 0.0000883
PE_mIM = 0.00005852
PI_mIM = 0.00003377
PS_mIM = 0.00000873
CL_mIM = 0.00002
PA_mIM = 0.0000039
ES_mIM = 0.008547
PC_mOM = 0.000636
PE_mOM = 0.0004822
PI_mOM = 0.0001289
PS_mOM = 0.0000167
CL_mOM = 0.00004467
PA_mOM = 0.0000696
ES_mOM = 0.0
PC_vM = 0.0003635
PE_vM = 0.4156
PI_vM = 0.0001297
PS_vM = 0.00003435
CL_vM = 0.0000068
PA_vM = 0.0000186
ES_vM = 0.0142
PC_n = 0.000055
PE_n = 0.000035
PI_n = 0.000017
PS_n = 0.0000072
CL_n = 0.0
PA_n = 0.0000031
ES_n = 0.0086
PC_gM = 0.00043
PE_gM = 0.00044
PI_gM = 0.00041
PS_gM = 0.0
CL_gM = 0.00022
PA_gM = 0.0
ES_gM = 0.0
PC_n = 0.0
PE_n = 0.0
PI_n = 0.0
PS_n = 0.0
CL_n = 0.0
PA_n = 0.0
ES_n = 0.0
PC_gM = 0.0
PE_gM = 0.0
PI_gM = 0.0
PS_gM = 0.0
CL_gM = 0.0
PA_gM = 0.0
ES_gM = 0.0
PC_vM = 0.0
PE_vM = 0.0
PI_vM = 0.0
PS_vM = 0.0
CL_vM = 0.0
PA_vM = 0.0
ES_vM = 0.0
PC_mIM = 0.0
PE_mIM = 0.0
PI_mIM = 0.0
PS_mIM = 0.0
CL_mIM = 0.0
PA_mIM = 0.0
ES_mIM = 0.0
PC_mOM = 0.0
PE_mOM = 0.0
PI_mOM = 0.0
PS_mOM = 0.0
CL_mOM = 0.0
PA_mOM = 0.0
ES_mOM = 0.0
Alipids = RBA_Matrix()
Alipids.col_names = ['F_mIM', 'F_mOM', 'F_vM', 'F_n', 'F_gM']
Alipids.row_names = ['M_pc_SC_c', 'M_pe_SC_c', 'M_ptd1ino_SC_c',
'M_ps_SC_c', 'M_clpn_SC_m', 'M_pa_SC_c', 'M_ergst_c']
Alipids.row_signs = [
self.Problem.LP.row_signs[self.Problem.LP.row_names.index(i)] for i in Alipids.row_names]
Alipids.b = numpy.array(
[self.Problem.LP.b[self.Problem.LP.row_names.index(i)] for i in Alipids.row_names])
Alipids.LB = numpy.array([0, 0, 0, 0, 0])
Alipids.UB = numpy.array([1, 1, 1, 1, 1])
Alipids.f = numpy.array([0, 0, 0, 0, 0])
LipidMatrix = numpy.zeros((7, 5))
LipidMatrix[Alipids.row_names.index(
'M_pc_SC_c'), Alipids.col_names.index('F_mIM')] = PC_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pe_SC_c'), Alipids.col_names.index('F_mIM')] = PE_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ptd1ino_SC_c'), Alipids.col_names.index('F_mIM')] = PI_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ps_SC_c'), Alipids.col_names.index('F_mIM')] = PS_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_clpn_SC_m'), Alipids.col_names.index('F_mIM')] = CL_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pa_SC_c'), Alipids.col_names.index('F_mIM')] = PA_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ergst_c'), Alipids.col_names.index('F_mIM')] = ES_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pc_SC_c'), Alipids.col_names.index('F_mOM')] = PC_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pe_SC_c'), Alipids.col_names.index('F_mOM')] = PE_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ptd1ino_SC_c'), Alipids.col_names.index('F_mOM')] = PI_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ps_SC_c'), Alipids.col_names.index('F_mOM')] = PS_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_clpn_SC_m'), Alipids.col_names.index('F_mOM')] = CL_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pa_SC_c'), Alipids.col_names.index('F_mOM')] = PA_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ergst_c'), Alipids.col_names.index('F_mOM')] = ES_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pc_SC_c'), Alipids.col_names.index('F_vM')] = PC_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pe_SC_c'), Alipids.col_names.index('F_vM')] = PE_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ptd1ino_SC_c'), Alipids.col_names.index('F_vM')] = PI_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ps_SC_c'), Alipids.col_names.index('F_vM')] = PS_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_clpn_SC_m'), Alipids.col_names.index('F_vM')] = CL_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pa_SC_c'), Alipids.col_names.index('F_vM')] = PA_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ergst_c'), Alipids.col_names.index('F_vM')] = ES_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pc_SC_c'), Alipids.col_names.index('F_n')] = PC_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pe_SC_c'), Alipids.col_names.index('F_n')] = PE_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ptd1ino_SC_c'), Alipids.col_names.index('F_n')] = PI_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ps_SC_c'), Alipids.col_names.index('F_n')] = PS_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_clpn_SC_m'), Alipids.col_names.index('F_n')] = CL_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pa_SC_c'), Alipids.col_names.index('F_n')] = PA_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ergst_c'), Alipids.col_names.index('F_n')] = ES_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pc_SC_c'), Alipids.col_names.index('F_gM')] = PC_gM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pe_SC_c'), Alipids.col_names.index('F_gM')] = PE_gM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ptd1ino_SC_c'), Alipids.col_names.index('F_gM')] = PI_gM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ps_SC_c'), Alipids.col_names.index('F_gM')] = PS_gM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_clpn_SC_m'), Alipids.col_names.index('F_gM')] = CL_gM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pa_SC_c'), Alipids.col_names.index('F_gM')] = PA_gM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ergst_c'), Alipids.col_names.index('F_gM')] = ES_gM/totalAA
MudepIndices = [('M_pc_SC_c', i) for i in Alipids.col_names]+[('M_pe_SC_c', i) for i in Alipids.col_names]+[('M_ptd1ino_SC_c', i) for i in Alipids.col_names]+[('M_ps_SC_c', i)
for i in Alipids.col_names]+[('M_clpn_SC_m', i) for i in Alipids.col_names]+[('M_pa_SC_c', i) for i in Alipids.col_names]+[('M_ergst_c', i) for i in Alipids.col_names]
self.Problem.MuDepIndices_A += MudepIndices
Alipids.A = scipy.sparse.coo_matrix(LipidMatrix)
Alipids.mapIndices()
self.Problem.LP.updateMatrix(Alipids, Ainds=MudepIndices)
self.Problem.MuOneMatrix.updateMatrix(Alipids, Ainds=MudepIndices)
## !!! ##
def eukaryoticDensities4(self, CompartmentRelationships=True):
Compartments = ['n', 'mIM', 'vM', 'mIMS', 'm', 'erM', 'mOM', 'x', 'c', 'cM', 'gM']
totalAA = 3.1*0.91
DensityIndices = [self.Problem.LP.row_names.index(
i) for i in self.Problem.CompartmentDensities]
A = self.Problem.LP.A.toarray()
A0 = self.Problem.MuOneMatrix.A.toarray()
OccupationMatrix = RBA_Matrix()
A = numpy.ones((len(Compartments)+1, len(Compartments)))
Eye = -numpy.eye(len(Compartments))
A[0:len(Compartments), :] = Eye
OccupationMatrix.b = numpy.array(list([float(0)]*len(Compartments))+[totalAA])
OccupationMatrix.f = numpy.array([float(0)]*len(Compartments))
OccupationMatrix.LB = numpy.array([float(0)]*len(Compartments))
OccupationMatrix.UB = numpy.array([float(totalAA)]*len(Compartments))
OccupationMatrix.row_signs = ['E']*(len(Compartments)+1)
OccupationMatrix.row_names = ['n_density', 'mIM_density', 'vM_density', 'mIMS_density',
'm_density', 'erM_density', 'mOM_density', 'x_density', 'cM_density', 'gM_density', 'c_density', 'O_total']
OccupationMatrix.col_names = ['O_n', 'O_mIM', 'O_vM', 'O_mIMS',
'O_m', 'O_erM', 'O_mOM', 'O_x', 'O_cM', 'O_gM', 'O_c']
if CompartmentRelationships:
m_mIM = 0.5
m_mIMS = 1
m_mOM = 5
Anew = numpy.zeros((A.shape[0]+3, A.shape[1]))
Anew[0:A.shape[0], :] = A
OccupationMatrix.row_names += ['m_mIM', 'm_mIMS', 'm_mOM']
OccupationMatrix.row_signs += ['E', 'E', 'E']
OccupationMatrix.b = numpy.array(list(OccupationMatrix.b)+[float(0)]*3)
Anew[OccupationMatrix.row_names.index(
'm_mIM'), OccupationMatrix.col_names.index('O_m')] = float(1)
Anew[OccupationMatrix.row_names.index(
'm_mIMS'), OccupationMatrix.col_names.index('O_m')] = float(1)
Anew[OccupationMatrix.row_names.index(
'm_mOM'), OccupationMatrix.col_names.index('O_m')] = float(1)
Anew[OccupationMatrix.row_names.index(
'm_mIM'), OccupationMatrix.col_names.index('O_mIM')] = -m_mIM
Anew[OccupationMatrix.row_names.index(
'm_mIMS'), OccupationMatrix.col_names.index('O_mIMS')] = -m_mIMS
Anew[OccupationMatrix.row_names.index(
'm_mOM'), OccupationMatrix.col_names.index('O_mOM')] = -m_mOM
OccupationMatrix.A = scipy.sparse.coo_matrix(Anew)
else:
OccupationMatrix.A = scipy.sparse.coo_matrix(A)
OccupationMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=OccupationMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=OccupationMatrix)
# {'Index':{'Param1':'+','Param2':'+','Param2':'-'}}
#Type: 'Sum'#
# {'Index':'Param1'}
self.Problem.MuDependencies['FromParameters']['b'].update(
{'n_density': 'AAres_PG_nucleus_DNA'})
self.Problem.MuDependencies['FromParameters']['b'].update(
{'O_total': {'Equation': 'amino_acid_concentration_total - AAres_PG_secreted_Euk', 'Variables': ['amino_acid_concentration_total', 'AAres_PG_secreted_Euk']}})
self.Problem.MuDependencies['FromMatrix']['b'].remove('n_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('mIM_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('vM_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('mIMS_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('m_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('erM_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('mOM_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('x_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('cM_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('gM_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('c_density')
## !!! ##
def eukaryoticDensities_calibration(self, CompartmentRelationships=False, mitoProportions={}, amino_acid_concentration_total='amino_acid_concentration_total'):
Compartments = ['n', 'mIM', 'vM', 'mIMS', 'm', 'erM', 'mOM', 'x', 'c', 'cM', 'gM']
totalAA_parameter = amino_acid_concentration_total
totalAA = 3.1
DensityIndices = [self.Problem.LP.row_names.index(
i) for i in self.Problem.CompartmentDensities]
A = self.Problem.LP.A.toarray()
A0 = self.Problem.MuOneMatrix.A.toarray()
OccupationMatrix = RBA_Matrix()
A = numpy.ones((len(Compartments)+1, len(Compartments)))
Eye = -numpy.eye(len(Compartments))
A[0:len(Compartments), :] = Eye
OccupationMatrix.b = numpy.array(list([float(0)]*len(Compartments))+[totalAA])
OccupationMatrix.f = numpy.array([float(0)]*len(Compartments))
OccupationMatrix.LB = numpy.array([float(0)]*len(Compartments))
OccupationMatrix.UB = numpy.array([float(totalAA)]*len(Compartments))
OccupationMatrix.row_signs = ['E']*(len(Compartments)+1)
OccupationMatrix.row_names = ['n_density', 'mIM_density', 'vM_density', 'mIMS_density',
'm_density', 'erM_density', 'mOM_density', 'x_density', 'cM_density', 'gM_density', 'c_density', 'O_total']
OccupationMatrix.col_names = ['O_n', 'O_mIM', 'O_vM', 'O_mIMS',
'O_m', 'O_erM', 'O_mOM', 'O_x', 'O_cM', 'O_gM', 'O_c']
if CompartmentRelationships:
if len(list(mitoProportions.keys())) == 3:
m_mIM = mitoProportions['m_mIM']
m_mIMS = mitoProportions['m_mIMS']
m_mOM = mitoProportions['m_mOM']
Anew = numpy.zeros((A.shape[0]+3, A.shape[1]))
Anew[0:A.shape[0], :] = A
OccupationMatrix.row_names += ['m_mIM', 'm_mIMS', 'm_mOM']
OccupationMatrix.row_signs += ['E', 'E', 'E']
OccupationMatrix.b = numpy.array(list(OccupationMatrix.b)+[float(0)]*3)
Anew[OccupationMatrix.row_names.index(
'm_mIM'), OccupationMatrix.col_names.index('O_m')] = float(1)
Anew[OccupationMatrix.row_names.index(
'm_mIMS'), OccupationMatrix.col_names.index('O_m')] = float(1)
Anew[OccupationMatrix.row_names.index(
'm_mOM'), OccupationMatrix.col_names.index('O_m')] = float(1)
Anew[OccupationMatrix.row_names.index(
'm_mIM'), OccupationMatrix.col_names.index('O_mIM')] = -m_mIM
Anew[OccupationMatrix.row_names.index(
'm_mIMS'), OccupationMatrix.col_names.index('O_mIMS')] = -m_mIMS
Anew[OccupationMatrix.row_names.index(
'm_mOM'), OccupationMatrix.col_names.index('O_mOM')] = -m_mOM
OccupationMatrix.A = scipy.sparse.coo_matrix(Anew)
else:
OccupationMatrix.A = scipy.sparse.coo_matrix(A)
OccupationMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=OccupationMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=OccupationMatrix)
# {'Index':{'Param1':'+','Param2':'+','Param2':'-'}}
#Type: 'Sum'#
# {'Index':'Param1'}
self.Problem.MuDependencies['FromParameters']['b'].update(
{'n_density': {'Equation': '-nonenzymatic_proteins_n/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_n', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'mIM_density': {
'Equation': '-nonenzymatic_proteins_mIM/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_mIM', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'vM_density': {
'Equation': '-nonenzymatic_proteins_vM/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_vM', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'mIMS_density': {
'Equation': '-nonenzymatic_proteins_mIMS/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_mIMS', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update(
{'m_density': {'Equation': '-nonenzymatic_proteins_m/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_m', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'erM_density': {
'Equation': '-nonenzymatic_proteins_erM/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_erM', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'mOM_density': {
'Equation': '-nonenzymatic_proteins_mOM/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_mOM', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update(
{'x_density': {'Equation': '-nonenzymatic_proteins_x/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_x', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'cM_density': {
'Equation': '-nonenzymatic_proteins_cM/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_cM', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'gM_density': {
'Equation': '-nonenzymatic_proteins_gM/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_gM', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update(
{'c_density': {'Equation': '-nonenzymatic_proteins_c/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_c', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'O_total': {'Equation': '{} - nonenzymatic_proteins_Secreted/inverse_average_protein_length'.format(totalAA_parameter), 'Variables': [
totalAA_parameter, 'nonenzymatic_proteins_Secreted', 'inverse_average_protein_length']}})
# !!! deal with hardcoded parameter_names... !!!
def estimate_specific_Kapps(self, proteomicsData, flux_bounds, mu, biomass_function=None, target_biomass_function=True, parsimonious_fba=True):
"""
Parameters
----------
proteomicsData : pandas.DataFrame (in mmol/gDW)
flux_bounds : pandas.DataFrame (in mmol/(gDW*h))
mu : float (in 1/h)
biomass_function : str
target_biomass_function : bool
atp_maintenance_to_biomassfunction : bool
eukaryotic : bool
"""
from scipy.stats.mstats import gmean
old_model = copy.deepcopy(self.model)
for i in self.model.targets.target_groups._elements_by_id['translation_targets'].concentrations._elements:
if i.species == 'average_protein_c':
new_agg = rba.xml.parameters.Aggregate(id_='total_protein', type_='multiplication')
new_agg.function_references.append(rba.xml.parameters.FunctionReference(
function='amino_acid_concentration_total'))
new_agg.function_references.append(rba.xml.parameters.FunctionReference(
function='inverse_average_protein_length'))
self.model.parameters.aggregates._elements.append(new_agg)
i.value = 'total_protein'
else:
self.model.targets.target_groups._elements_by_id['translation_targets'].concentrations._elements.remove(
i)
for i in self.model.targets.target_groups._elements_by_id['transcription_targets'].concentrations._elements:
if i.species == 'mrna':
new_agg = rba.xml.parameters.Aggregate(id_='total_rna', type_='multiplication')
new_agg.function_references.append(rba.xml.parameters.FunctionReference(
function='RNA_massfraction_CarbonLimitation'))
new_agg.function_references.append(
rba.xml.parameters.FunctionReference(function='RNA_inversemillimolarweight'))
self.model.parameters.aggregates._elements.append(new_agg)
i.value = 'total_rna'
else:
self.model.targets.target_groups._elements_by_id['transcription_targets'].concentrations._elements.remove(
i)
self.rebuild_from_model()
self.setMedium(self.Medium)
self.addExchangeReactions()
self.setMu(mu)
if target_biomass_function:
self.buildFBA(objective='targets', maintenanceToBM=True)
BMfunction = 'R_BIOMASS_targetsRBA'
else:
self.buildFBA(objective='classic', maintenanceToBM=False)
BMfunction = biomass_function
for j in [i for i in self.Medium.keys() if self.Medium[i] == 0]:
Exrxn = 'R_EX_'+j.split('M_')[-1]+'_e'
self.FBA.setUB({Exrxn: 0})
rxn_LBs = {}
rxn_UBs = {}
for rx in flux_bounds['Reaction_ID']:
lb = flux_bounds.loc[flux_bounds['Reaction_ID'] == rx, 'LB'].values[0]
ub = flux_bounds.loc[flux_bounds['Reaction_ID'] == rx, 'UB'].values[0]
if not pandas.isna(lb):
rxn_LBs.update({rx: lb})
if not | pandas.isna(ub) | pandas.isna |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import scipy.stats as stats
from datetime import datetime, timedelta
import math
import os
import logging
from pathlib import Path
import ast
class bpeace2():
'''
Class used to process bpeace2 data (Spring 2020 into Summer 2020)
'''
def __init__(self):
self.study = 'bpeace2'
self.id_crossover = pd.read_excel('../../data/raw/bpeace2/admin/id_crossover.xlsx',sheet_name='id')
self.beacon_id = pd.read_excel('../../data/raw/bpeace2/admin/id_crossover.xlsx',sheet_name='beacon')
def move_to_purgatory(self,path_to_file,path_to_destination):
'''
Moves problematic file to the purgatory data directory
Returns void
'''
print('\t\tMoving to purgatory...')
os.replace(path_to_file, path_to_destination)
def process_beacon(self, remove_extreme=True):
'''
Combines data from all sensors on all beacons
Returns True if able to save one dataframe that contains all the data at regular intervals in /data/processed directory
'''
# list of all beacons used in the study
beacon_list = [1,5,6,7,10,11,15,16,17,19,21,22,23,24,25,26,28,29,30,32,34,36,38,40,41,44,46,48]
beacons_folder='../../data/raw/bpeace2/beacon'
beacon_data_list = [] # list where each item will be a dataframe for a beacon
print('\nProcessing beacon data...')
for beacon in beacon_list:
# correcting the number since the values <10 have leading zero in directory
number = f'{beacon:02}'
print(f'\t{number}')
beacon_folder=f'{beacons_folder}/B{number}'
beacon_df = pd.DataFrame() # dataframe specific to the beacon
# getting other ids
beacon_crossover_info = self.id_crossover.loc[self.id_crossover['Beacon']==beacon].reset_index(drop=True)
beiwe = beacon_crossover_info['Beiwe'][0]
fitbit = beacon_crossover_info['Fitbit'][0]
redcap = beacon_crossover_info['REDCap'][0]
del beacon_crossover_info
def import_and_merge(csv_dir,number):
df_list = []
for file in os.listdir(csv_dir+'/'):
try:
# reading in raw data (csv for one day at a time) and appending it to the overal dataframe
day_df = pd.read_csv(f'{csv_dir}/{file}',
index_col='Timestamp',parse_dates=True,
infer_datetime_format=True)
df_list.append(day_df)
except Exception as inst:
# for whatever reason, some files have header issues - these are moved to purgatory to undergo triage
#print(f'{inst}; filename: {file}')
print(f'Issue encountered while importing {csv_dir}/{file}, skipping...')
self.move_to_purgatory(f'{csv_dir}/{file}',f'../../data/purgatory/{self.study}-B{number}-py3-{file}')
df = pd.concat(df_list).resample('5T').mean() # resampling to 5 minute intervals (raw data is at about 1 min)
return df
# Python3 Sensors
# ---------------
# dataframe for sensors using python3
py3_df = import_and_merge(f'{beacon_folder}/adafruit', number)
# Changing NO2 readings on beacons without NO2 readings to CO (wiring issues - see Hagen)
if number in ['28','29','32','34','36','38','40','46','30','44']:
print('\t\t\tNo NO2 sensor - removing values')
py3_df[['CO','T_CO','RH_CO']] = py3_df[['NO2','T_NO2','RH_NO2']]
py3_df[['NO2','T_NO2','RH_NO2']] = np.nan
py3_df['CO'] /= 1000 # converting ppb measurements to ppm
# Python2 Sensors
# ---------------
# dataframe for sensors using python3
py2_df = import_and_merge(f'{beacon_folder}/sensirion', number)
# merging python2 and 3 sensor dataframes
beacon_df = py3_df.merge(right=py2_df,left_index=True,right_index=True,how='outer')
# getting relevant data only
beacon_info = self.beacon_id[self.beacon_id['Beiwe'] == beiwe]
start_date = beacon_info['start_date'].values[0]
end_date = beacon_info['end_date'].values[0]
beacon_df = beacon_df[start_date:end_date]
del beacon_info
# removing bad values from important variables
important_vars = ['TVOC','CO2','NO2','CO','PM_C_2p5','PM_C_10','T_NO2','T_CO','Temperature [C]','Lux','RH_NO2','RH_CO','Relative Humidity']
# variables that should never have anything less than zero
for var in ['CO2','T_NO2','T_CO','Temperature [C]','RH_NO2','RH_CO','Relative Humidity']:
beacon_df[var].mask(beacon_df[var] < 0, np.nan, inplace=True)
# variables that should never be less than a certain limit
for var, threshold in zip(['CO2','Lux'],[100,-1]):
beacon_df[var].mask(beacon_df[var] < threshold, np.nan, inplace=True)
if remove_extreme:
# removing extreme values (zscore greater than 2.5)
# for var in important_vars:
# beacon_df['z'] = abs(beacon_df[var] - beacon_df[var].mean()) / beacon_df[var].std(ddof=0)
# beacon_df.loc[beacon_df['z'] > 2.5, var] = np.nan
# beacon_df.drop(columns='z',inplace=True)
#IQR method
for var in important_vars:
# Computing IQR
Q1 = beacon_df[var].quantile(0.25)
Q3 = beacon_df[var].quantile(0.75)
IQR = Q3 - Q1
# Filtering Values between Q1-1.5IQR and Q3+1.5IQR
beacon_df[var].mask(beacon_df[var]<Q1-1.5*IQR,np.nan,inplace=True)
beacon_df[var].mask(beacon_df[var]>Q3+1.5*IQR,np.nan,inplace=True)
# adding columns for the pt details
beacon_df['Beacon'] = beacon
beacon_df['Beiwe'] = beiwe
beacon_df['Fitbit'] = fitbit
beacon_df['REDCap'] = redcap
beacon_data_list.append(beacon_df)
beacon_data = pd.concat(beacon_data_list)
# saving
try:
filename = f'../../data/processed/bpeace2-beacon-IQR.parquet'
if not remove_extreme:
filename = f'../../data/processed/bpeace2-beacon-with-extreme.parquet'
beacon_data.to_parquet(filename, compression='brotli')
except:
return False
return True
def process_gps(self, data_dir = '/Volumes/HEF_Dissertation_Research/utx000/extension/data/beiwe/gps/'):
'''
Processes the raw gps data into one csv file for each participant and saves into /data/processed/
All GPS data are recorded at 1-second intervals and stored in separate data files for every hour. The
data are combined into one dataframe per participant, downsampled to 5-minute intervals using the
mode value for those 5-minutes (after rounding coordinates to five decimal places), and combined into
a final dataframe that contains all participants' data.
Returns True is able to process the data, false otherwise.
'''
print('\tProcessing gps data...')
gps_df = pd.DataFrame()
for participant in os.listdir(data_dir):
if len(participant) == 8: # checking to make sure we only look for participant directories
pid = participant
print(f'\t\tWorking for Participant: {pid}')
participant_df = pd.DataFrame() #
for file in os.listdir(f'{data_dir}{pid}/gps/'):
if file[-1] == 'v': # so we only import cs[v] files
try:
hourly_df = pd.read_csv(f'{data_dir}{pid}/gps/{file}',usecols=[1,2,3,4,5]) # all columns but UTC
except KeyError:
print(f'Problem with gps data for {file} for Participant {pid}')
self.move_to_purgatory(f'{data_dir}{pid}/gps/{file}',f'../../data/purgatory/{self.study}-{pid}-gps-{file}')
if len(hourly_df) > 0: # append to participant df if there were data for that hour
participant_df = participant_df.append(hourly_df,ignore_index=True)
# converting utc to cdt
participant_df['Time'] = pd.to_datetime(participant_df['UTC time']) - timedelta(hours=5)
participant_df.drop(['UTC time'],axis=1,inplace=True)
participant_df.set_index('Time',inplace=True)
# rounding gps and taking the mode for every 5-minutes
participant_df = round(participant_df,5)
participant_df = participant_df.resample('5T').apply({lambda x: stats.mode(x)[0]})
# converting values to numeric and removing NaN datapoints
participant_df.columns = ['Lat','Long','Alt','Accuracy']
for col in ['Lat','Long','Alt','Accuracy']:
participant_df[col] = pd.to_numeric(participant_df[col],errors='coerce')
participant_df.dropna(inplace=True)
# getting participant's home coordinates
home_coords = self.beacon_id.set_index('Beiwe')
home_lat = home_coords.loc[pid,'Lat']
home_long = home_coords.loc[pid,'Long']
# getting distance
R = 6.371*10**6 # radius of the earth in meters
participant_df['X_Distance'] = abs( R * (participant_df['Lat'] - home_lat) * math.pi * math.cos(home_long) / 180)
participant_df['Y_Distance'] = abs( R * (participant_df['Long'] - home_long) * math.pi / 180)
dist = []
for i in range(len(participant_df)):
dist.append(math.sqrt(math.pow(participant_df.iloc[i,-2],2) + math.pow(participant_df.iloc[i,-1],2)))
participant_df['Distance_Home'] = dist
participant_df['Beiwe'] = pid
gps_df = gps_df.append(participant_df)
try:
gps_df.to_csv(f'../../data/processed/bpeace2-gps.csv')
except:
return False
return True
def process_weekly_surveys(self):
'''
Processes raw weekly survey answers. The survey IDs are:
- eQ2L3J08ChlsdSXXKOoOjyLJ: morning
- 7TaT8zapOWO0xdtONnsY8CE0: evening
Returns True if able to save two dataframes for morning/evening survey data in /data/processed directory
'''
# defining some variables for ease of understanding
parent_dir = '../../data/raw/bpeace2/beiwe/survey_answers/'
morning_survey_id = 'eQ2L3J08ChlsdSXXKOoOjyLJ'
evening_survey_id = '7TaT8zapOWO0xdtONnsY8CE0'
weekly_survey_id = 'lh9veS0aSw2KfrfwSytYjxVr'
# defining the final dataframes to append to
evening_survey_df = pd.DataFrame()
morning_survey_df = pd.DataFrame()
weekly_survey_df = pd.DataFrame()
# Morning Survey Data
# -------------------
print('\tProcessing morning survey data...')
# looping through the participants and then all their data
for participant in os.listdir(parent_dir):
# making sure we don't read from any hidden directories/files
if len(participant) == 8:
pid = participant
participant_df = pd.DataFrame(columns=['ID','Content','Stress','Lonely','Sad','Energy','TST','SOL','NAW','Restful'])
for file in os.listdir(f'{parent_dir}{participant}/survey_answers/{morning_survey_id}/'):
# reading raw data
df = pd.read_csv(f'{parent_dir}{participant}/survey_answers/{morning_survey_id}/{file}')
# adding new row
try:
participant_df.loc[datetime.strptime(file[:-4],'%Y-%m-%d %H_%M_%S') - timedelta(hours=5)] = [pid,df.loc[4,'answer'],df.loc[5,'answer'],df.loc[6,'answer'],df.loc[7,'answer'],df.loc[8,'answer'],
df.loc[0,'answer'],df.loc[1,'answer'],df.loc[2,'answer'],df.loc[3,'answer']]
except KeyError:
print(f'\t\tProblem with morning survey {file} for Participant {pid} - Participant most likely did not answer a question')
self.move_to_purgatory(f'{parent_dir}{participant}/survey_answers/{morning_survey_id}/{file}',f'../../data/purgatory/{self.study}-{pid}-survey-morning-{file}')
# appending participant df to overall df
morning_survey_df = morning_survey_df.append(participant_df)
else:
print(f'\t\tDirectory {participant} is not valid')
# replacing string values with numeric
morning_survey_df.replace({'Not at all':0,'A little bit':1,'Quite a bit':2,'Very Much':3,
'Low energy':0,'Low Energy':0,'Somewhat low energy':1,'Neutral':2,'Somewhat high energy':3,'High energy':4,'High Energy':4,
'Not at all restful':0,'Slightly restful':1,'Somewhat restful':2,'Very restful':3,
'NO_ANSWER_SELECTED':-1,'NOT_PRESENTED':-1,'SKIP QUESTION':-1},inplace=True)
# fixing any string inputs outside the above range
morning_survey_df['NAW'] = pd.to_numeric(morning_survey_df['NAW'],errors='coerce')
# Evening Survey Data
# -------------------
print('\tProcessing evening survey data...')
for participant in os.listdir(parent_dir):
if len(participant) == 8:
pid = participant
# less columns
participant_df = pd.DataFrame(columns=['ID','Content','Stress','Lonely','Sad','Energy'])
for file in os.listdir(f'{parent_dir}{participant}/survey_answers/{evening_survey_id}/'):
df = pd.read_csv(f'{parent_dir}{participant}/survey_answers/{evening_survey_id}/{file}')
try:
participant_df.loc[datetime.strptime(file[:-4],'%Y-%m-%d %H_%M_%S') - timedelta(hours=5)] = [pid,df.loc[0,'answer'],df.loc[1,'answer'],df.loc[2,'answer'],df.loc[3,'answer'],df.loc[4,'answer']]
except KeyError:
print(f'\t\tProblem with evening survey {file} for Participant {pid} - Participant most likely did not answer a question')
self.move_to_purgatory(f'{parent_dir}{participant}/survey_answers/{evening_survey_id}/{file}',f'../../data/purgatory/{self.study}-{pid}-survey-evening-{file}')
evening_survey_df = evening_survey_df.append(participant_df)
else:
print(f'\t\tDirectory {participant} is not valid')
evening_survey_df.replace({'Not at all':0,'A little bit':1,'Quite a bit':2,'Very Much':3,
'Low energy':0,'Low Energy':0,'Somewhat low energy':1,'Neutral':2,'Somewhat high energy':3,'High energy':4,'High Energy':4,
'Not at all restful':0,'Slightly restful':1,'Somewhat restful':2,'Very restful':3,
'NO_ANSWER_SELECTED':-1,'NOT_PRESENTED':-1,'SKIP QUESTION':-1},inplace=True)
# Weekly Survey Data
# -------------------
print('\tProcessing weekly survey data...')
for participant in os.listdir(parent_dir):
if len(participant) == 8:
pid = participant
# less columns
participant_df = pd.DataFrame(columns=['ID','Upset','Unable','Stressed','Confident','Your_Way','Cope','Able','Top','Angered','Overcome'])
try:
for file in os.listdir(f'{parent_dir}{participant}/survey_answers/{weekly_survey_id}/'):
df = pd.read_csv(f'{parent_dir}{participant}/survey_answers/{weekly_survey_id}/{file}')
try:
participant_df.loc[datetime.strptime(file[:-4],'%Y-%m-%d %H_%M_%S') - timedelta(hours=6)] = [pid,df.loc[1,'answer'],df.loc[2,'answer'],df.loc[3,'answer'],df.loc[4,'answer'],df.loc[5,'answer'],df.loc[6,'answer'],df.loc[7,'answer'],df.loc[8,'answer'],df.loc[9,'answer'],df.loc[10,'answer']]
except KeyError:
try:
participant_df.loc[datetime.strptime(file[:-4],'%Y-%m-%d %H_%M_%S') - timedelta(hours=6)] = [pid,df.loc[0,'answer'],df.loc[1,'answer'],df.loc[2,'answer'],df.loc[3,'answer'],df.loc[4,'answer'],df.loc[5,'answer'],df.loc[6,'answer'],df.loc[7,'answer'],df.loc[8,'answer'],df.loc[9,'answer']]
except:
print(f'\t\tProblem with weekly survey {file} for Participant {pid} - Participant most likely did not answer a question')
self.move_to_purgatory(f'{parent_dir}{participant}/survey_answers/{weekly_survey_id}/{file}',f'../../data/purgatory/{self.study}-{pid}-survey-weekly-{file}')
weekly_survey_df = weekly_survey_df.append(participant_df)
except FileNotFoundError:
print(f'\t\tParticipant {pid} does not seem to have submitted any weekly surveys - check directory')
else:
print(f'\t\tDirectory {participant} is not valid')
weekly_survey_df.replace({'Not at all':0,'A little bit':1,'Quite a bit':2,'Very Much':3,
'Never':0,'Almost Never':1,'Sometimes':2,'Fairly Often':3,'Very Often':4,
'Low energy':0,'Low Energy':0,'Somewhat low energy':1,'Neutral':2,'Somewhat high energy':3,'High energy':4,'High Energy':4,
'Not at all restful':0,'Slightly restful':1,'Somewhat restful':2,'Very restful':3,
'NO_ANSWER_SELECTED':-1,'NOT_PRESENTED':-1,'SKIP QUESTION':-1},inplace=True)
# saving
try:
morning_survey_df.to_csv(f'../../data/processed/bpeace2-morning-survey.csv')
evening_survey_df.to_csv(f'../../data/processed/bpeace2-evening-survey.csv')
weekly_survey_df.to_csv(f'../../data/processed/bpeace2-weekly-survey.csv')
except:
return False
return True
def process_environment_survey(self, data_file='../../data/raw/bpeace2/surveys/EESurvey_E1_raw.csv'):
'''
Processes raw environment survey (first instance) and combines relevant data into processed directory
Returns True if processed, False otherwise
'''
print('\tProcessing first environment survey...')
ee = pd.read_csv(data_file,usecols=[0,2,4,5,6,7,8,9],parse_dates=[1])
ee.columns = ['REDCap','Timestamp','Apartment','Duplex','House','Dorm','Hotel','Other']
ee.dropna(subset=['Timestamp'],inplace=True)
ee.set_index('Timestamp',inplace=True)
# saving
try:
ee.to_csv(f'../../data/processed/bpeace2-ee-survey.csv')
except:
return False
return True
def process_fitbit(self):
'''
Processes fitbit data
Returns True if processed, False otherwise
'''
print('\tProcessing Fitbit data...')
def import_fitbit(filename, pt_dir=f"../../data/raw/bpeace2/fitbit/"):
'''
Imports the specified file for each participant in the directory
Inputs:
- filename: string corresponding to the filename to look for for each participant
Returns a dataframe with the combined data from all participants
'''
print(f"\tReading from file {filename}")
df = pd.DataFrame()
for pt in os.listdir(pt_dir):
if pt[0] != ".":
print(f"\t\tReading for participant {pt}")
try:
temp = pd.read_csv(f"{pt_dir}{pt}/fitbit_{filename}.csv", index_col=0, parse_dates=True)
if filename[:4] == "intr":
temp = process_fitbit_intraday(temp)
temp["beiwe"] = pt
df = df.append(temp)
except FileNotFoundError:
print(f"\t\tFile {filename} not found for participant {pt}")
return df
def get_device_df(info_df):
'''
Take dictionary-like entries for fitbit info dataframe for each row in a dataframe and makes a new dataframe
Inputs:
- info_df: the fitbit info dataframe with the dictionary-like entries
Returns a dataframe for the device column
'''
overall_dict = {}
for row in range(len(info_df)):
Dict = ast.literal_eval(info_df['devices'][row])
if type(Dict) == dict:
Dict = Dict
elif type(Dict) in [tuple,list] and len(Dict) > 1:
Dict = Dict[0]
else:
continue
for key in Dict.keys():
overall_dict.setdefault(key, [])
overall_dict[key].append(Dict[key])
# adding in the date of recording
overall_dict.setdefault('date', [])
overall_dict['date'].append(info_df.index[row])
df = pd.DataFrame(overall_dict)
df['date'] = pd.to_datetime(df['date'],errors='coerce')
return df.set_index('date')
def get_daily_sleep(daily_df):
'''
Creates a dataframe with the daily sleep data summarized
Inputs:
- daily_df: dataframe created from the daily fitbit csv file
Returns a dataframe of the daily sleep data
'''
overall_dict = {}
for row in range(len(daily_df)):
# in case Fitbit didn't record sleep records for that night - value is NaN
pt = daily_df['beiwe'][row]
# pts with classic sleep data
if pt in ['awa8uces','ewvz3zm1','pgvvwyvh']:
continue
if type(daily_df['sleep'][row]) == float:
continue
else:
Dict = ast.literal_eval(daily_df['sleep'][row])
if type(Dict) == dict:
Dict = Dict
else:
Dict = Dict[0]
for key in Dict.keys():
overall_dict.setdefault(key, [])
overall_dict[key].append(Dict[key])
# adding in the date of recording
overall_dict.setdefault('date', [])
overall_dict['date'].append(daily_df.index[row])
# adding beiwe id
overall_dict.setdefault('beiwe', [])
overall_dict['beiwe'].append(daily_df['beiwe'][row])
df = pd.DataFrame(overall_dict)
df['date'] = pd.to_datetime(df['date'],errors='coerce')
# removing classic sleep stage data
df = df[df['type'] != 'classic']
return df.set_index('date')
def get_sleep_stages(daily_sleep):
'''
Creates a dataframe for the minute sleep data
Input(s):
- daily_sleep: dataframe holding the daily sleep data with a column called minuteData
Returns:
- sleep_stages: a dataframe with sleep stage data for every stage transition
- summary: a dataframe with the nightly sleep stage information
'''
data_dict = {'startDate':[],'endDate':[],'dateTime':[],'level':[],'seconds':[],'beiwe':[]}
summary_dict = {'startDate':[],'endDate':[],'deep_count':[],'deep_minutes':[],'light_count':[],'light_minutes':[],
'rem_count':[],'rem_minutes':[],'wake_count':[],'wake_minutes':[],'beiwe':[]}
for row in range(len(daily_sleep)):
d0 = pd.to_datetime(daily_sleep['startTime'][row])
d1 = pd.to_datetime(daily_sleep['dateOfSleep'][row])
sleep_dict = daily_sleep['levels'][row]
for key in sleep_dict.keys():
if key == 'data': # data without short wake periods
temp_data = sleep_dict['data']
for temp_data_dict in temp_data:
for data_key in temp_data_dict.keys():
data_dict[data_key].append(temp_data_dict[data_key])
data_dict['startDate'].append(d0.date())
data_dict['endDate'].append(d1.date())
data_dict['beiwe'].append(daily_sleep['beiwe'][row])
elif key == 'summary': # nightly summary data - already in dictionary form
for summary_key in sleep_dict['summary'].keys():
stage_dict = sleep_dict['summary'][summary_key]
for stage_key in ['count','minutes']:
summary_dict[f'{summary_key}_{stage_key}'].append(stage_dict[stage_key])
summary_dict['startDate'].append(d0.date())
summary_dict['endDate'].append(d1.date())
summary_dict['beiwe'].append(daily_sleep['beiwe'][row])
else: # shortData or data with short wake periods - don't need
pass
sleep_stages = pd.DataFrame(data_dict)
sleep_stages.columns = ['start_date','end_date','time','stage','time_at_stage','beiwe'] # renaming columns
# adding column for numeric value of sleep stage
def numeric_from_str_sleep_stage(row):
if row['stage'] == 'wake':
return 0
elif row['stage'] == 'light':
return 1
elif row['stage'] == 'deep':
return 2
elif row['stage'] == 'rem':
return 3
else:
return -1
sleep_stages['value'] = sleep_stages.apply(lambda row: numeric_from_str_sleep_stage(row), axis=1)
summary = | pd.DataFrame(summary_dict) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/2 23:26
Desc: 东方财富网-行情首页-沪深京 A 股
"""
import requests
import pandas as pd
def stock_zh_a_spot_em() -> pd.DataFrame:
"""
东方财富网-沪深京 A 股-实时行情
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 实时行情
:rtype: pandas.DataFrame
"""
url = "http://82.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:6,m:0 t:80,m:1 t:2,m:1 t:23,m:0 t:81 s:2048",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return pd.DataFrame()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"量比",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df.index + 1
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"最高",
"最低",
"今开",
"昨收",
"量比",
"换手率",
"市盈率-动态",
"市净率",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["今开"] = pd.to_numeric(temp_df["今开"], errors="coerce")
temp_df["昨收"] = pd.to_numeric(temp_df["昨收"], errors="coerce")
temp_df["量比"] = pd.to_numeric(temp_df["量比"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
temp_df["市盈率-动态"] = pd.to_numeric(temp_df["市盈率-动态"], errors="coerce")
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"], errors="coerce")
return temp_df
def stock_zh_b_spot_em() -> pd.DataFrame:
"""
东方财富网- B 股-实时行情
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 实时行情
:rtype: pandas.DataFrame
"""
url = "http://28.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:7,m:1 t:3",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return pd.DataFrame()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"量比",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"最高",
"最低",
"今开",
"昨收",
"量比",
"换手率",
"市盈率-动态",
"市净率",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["今开"] = pd.to_numeric(temp_df["今开"], errors="coerce")
temp_df["昨收"] = pd.to_numeric(temp_df["昨收"], errors="coerce")
temp_df["量比"] = pd.to_numeric(temp_df["量比"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
temp_df["市盈率-动态"] = pd.to_numeric(temp_df["市盈率-动态"], errors="coerce")
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"], errors="coerce")
return temp_df
def code_id_map_em() -> dict:
"""
东方财富-股票和市场代码
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 股票和市场代码
:rtype: dict
"""
url = "http://80.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:1 t:2,m:1 t:23",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df["market_id"] = 1
temp_df.columns = ["sh_code", "sh_id"]
code_id_dict = dict(zip(temp_df["sh_code"], temp_df["sh_id"]))
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:6,m:0 t:80",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df_sz = pd.DataFrame(data_json["data"]["diff"])
temp_df_sz["sz_id"] = 0
code_id_dict.update(dict(zip(temp_df_sz["f12"], temp_df_sz["sz_id"])))
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:81 s:2048",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df_sz = pd.DataFrame(data_json["data"]["diff"])
temp_df_sz["bj_id"] = 0
code_id_dict.update(dict(zip(temp_df_sz["f12"], temp_df_sz["bj_id"])))
return code_id_dict
def stock_zh_a_hist(
symbol: str = "000001",
period: str = "daily",
start_date: str = "19700101",
end_date: str = "20500101",
adjust: str = "",
) -> pd.DataFrame:
"""
东方财富网-行情首页-沪深京 A 股-每日行情
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 股票代码
:type symbol: str
:param period: choice of {'daily', 'weekly', 'monthly'}
:type period: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:param adjust: choice of {"qfq": "前复权", "hfq": "后复权", "": "不复权"}
:type adjust: str
:return: 每日行情
:rtype: pandas.DataFrame
"""
code_id_dict = code_id_map_em()
adjust_dict = {"qfq": "1", "hfq": "2", "": "0"}
period_dict = {"daily": "101", "weekly": "102", "monthly": "103"}
url = "http://push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61,f116",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"klt": period_dict[period],
"fqt": adjust_dict[adjust],
"secid": f"{code_id_dict[symbol]}.{symbol}",
"beg": start_date,
"end": end_date,
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["klines"]:
return pd.DataFrame()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
temp_df.columns = [
"日期",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df.index = pd.to_datetime(temp_df["日期"])
temp_df.reset_index(inplace=True, drop=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_ | numeric(temp_df["成交额"]) | pandas.to_numeric |
"""
data_collection_10_Network_Centrality.py
This code calcualtes centrality metrics...
1 - load in graphs - Largest weakly Connected Components!!
2 - applying HITs algorithm
4 - re-calculating centrality within my sub-graph only
(4 - draw network graphs with hubs centrality metrics --> see next .py doc)
@author: lizakarmannaya
"""
import networkx as nx
import pandas as pd
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import fnmatch
import os
import glob
from scipy.stats import skew, kurtosis, mode
#### 1 - load in graphs - Largest weakly Connected Components!! ####
os.chdir(os.path.expanduser("~"))
L = nx.read_pajek('study2_largest_wcc_LEFT_directed.net')
R = nx.read_pajek('study2_largest_wcc_RIGHT_directed.net')
#this imports them as multigraph types --> convert to DiGraph
L = nx.DiGraph(L)
R = nx.DiGraph(R)
########################################
#### 2 - applying HITS algorithm #####
########################################
#for LEFT
hits_L_hubs, hits_L_authorities = nx.hits(L)
plt.hist(hits_L_hubs.values(), 40, log=False, facecolor='red', alpha=0.5)
plt.savefig('RESULTS/hist_LEFT_hubs_with_elites') #have to execute together with line above to avoid saving blank canvas
plt.show()
#deleting elites from graph
my_elites = pd.read_csv('my_elites.csv', index_col=0)
my_elites['twitter_name_index'] = my_elites['twitter_name']
my_elites = my_elites.set_index('twitter_name_index') #using twitter_name = screen_name as index for later
my_elites.head()
elites_ids = my_elites['user_id'] #pandas series
len(elites_ids) #420
##now delete these elites from page_rank_L - LEFT:
#need to create a list of strings first
to_delete = []
for item in elites_ids:
key = str(item)
to_delete.append(key)
len(to_delete) #420
### LEFT ####
to_delete_LEFT = set(L.nodes()).intersection(to_delete)
len(to_delete_LEFT) #29
hits_L_hubs_noelites = hits_L_hubs ## NB this currently doesn't help distibguish them
for item in to_delete_LEFT:
del hits_L_hubs_noelites[item]
len(hits_L_hubs_noelites) #822752 - without elites
L.number_of_nodes() #822781 - with elites
##NB re-run these 3 sections below
plt.hist(hits_L_hubs_noelites.values(), 40, log=False, facecolor='red', alpha=0.5)
plt.savefig('RESULTS/hist_LEFT_hubs_noelites') #have to execute together with line above to avoid saving blank canvas
plt.hist(hits_L_hubs_noelites.values(), 40, log=True, facecolor='red', alpha=0.5)
plt.savefig('RESULTS/hist_LEFT_hubs_noelites_logscale') #have to execute together with line above to avoid saving blank canvas
LEFT_hubs = pd.DataFrame.from_dict(data=hits_L_hubs_noelites, orient='index', columns=['hubs'])
LEFT_hubs.to_csv('hubs_scores/LEFT_hubs_noelites.csv')
#repeat for RIGHT
hits_R_hubs, hits_R_authorities = nx.hits(R)
#example hits_L_authorities['703690879'] #0
plt.hist(hits_R_hubs.values(), 40, log=False, facecolor='blue', alpha=0.5)
plt.savefig('RESULTS/hist_RIGHT_hubs_with_elites') #have to execute together with line above to avoid saving blank canvas
#deleting elites from graph
to_delete_RIGHT = set(R.nodes()).intersection(to_delete)
len(to_delete_RIGHT) #35
hits_R_hubs_noelites = hits_R_hubs ### NB this currently doesn't help distibguish them - pointless
for item in to_delete_RIGHT:
del hits_R_hubs_noelites[item]
len(hits_R_hubs_noelites) #1542221 - without elites
#len(hits_R_hubs) #1542221 - original dict is also modified
R.number_of_nodes() #1542256 - with elites
#NB re-run these 3 sections below
plt.hist(hits_R_hubs_noelites.values(), 40, log=False, facecolor='blue', alpha=0.5)
plt.savefig('RESULTS/hist_RIGHT_hubs_noelites') #have to execute together with line above to avoid saving blank canvas
plt.hist(hits_R_hubs_noelites.values(), 40, log=True, facecolor='blue', alpha=0.5)
plt.savefig('RESULTS/hist_RIGHT_hubs_noelites_logscale') #have to execute together with line above to avoid saving blank canvas
RIGHT_hubs = pd.DataFrame.from_dict(data=hits_R_hubs_noelites, orient='index', columns=['hubs'])
RIGHT_hubs.to_csv('hubs_scores/RIGHT_hubs_noelites.csv')
RIGHT_hubs
#### calculating skew and kurtosis for entire sample's hubs centrality
## NB re-run these?
L_hubs = list(hits_L_hubs.values()) #currently this is without the elites, as they were taken out above
len(L_hubs) #822752
skew(L_hubs) #-0.1830900326354742
kurtosis(L_hubs) #-1.8363738717470777
np.mean(L_hubs)
mode(L_hubs)
np.median(L_hubs)
np.std(L_hubs)
R_hubs = list(hits_R_hubs.values()) #currently this is without the elites, as they were taken out above
len(R_hubs) #1542221
skew(R_hubs) #-0.6376712808927192
kurtosis(R_hubs) #-1.16105655692604
np.mean(R_hubs)
mode(R_hubs)
np.median(R_hubs)
np.std(R_hubs)
entire_hubs = L_hubs+R_hubs
len(entire_hubs) #2,364,973
skew(entire_hubs) #0.7903545150997883
kurtosis(entire_hubs) #-0.3640943243229504
np.mean(entire_hubs)
mode(entire_hubs)
np.median(entire_hubs)
np.std(entire_hubs)
#### save hubs & authorities values into results df ####
df = pd.read_csv('RESULTS_df_multiverse_4.csv', index_col=0)
df = df.reset_index()
df['authorities']='NaN'
df['hubs'] = 'NaN'
df.head()
errors3 = []
for index in df.index:
user_id = df['user_id_str'].values[index] #NB this is a numpy integer
if df['side'].values[index] == 'LEFT':
try:
df['authorities'].values[index] = hits_L_authorities[str(user_id)]
df['hubs'].values[index] = hits_L_hubs[str(user_id)]
except KeyError as e:
errors3.append(e)
print(e)
df['authorities'].values[index] = 'NaN'
df['hubs'].values[index]='NaN'
elif df['side'].values[index] == 'RIGHT':
try:
df['authorities'].values[index] = hits_R_authorities[str(user_id)]
df['hubs'].values[index] = hits_R_hubs[str(user_id)]
except KeyError as e:
errors3.append(user_id)
print(e)
df['authorities'].values[index] = 'NaN'
df['hubs'].values[index] = 'NaN'
else:
print('error')
len(errors3) #326
df.head()
df.shape #(34284, 20)
df['authorities']
df['hubs']
# create user_id_str_index from user_id_str
# df = df.set_index('user_id_str_index') #using twitter_name = screen_name as index for later
df.to_csv('RESULTS_df_multiverse_6.csv')
#plt.hist(df['authorities'], log=False, facecolor='purple', alpha=0.5)
#plt.hist(df['hubs'], log=False, facecolor='purple', alpha=0.5)
#############################################################################
########### 3 - re-calculating centrality within my sub-graph only ##########
#############################################################################
#re-load in elites
my_elites = pd.read_csv('my_elites.csv', index_col=0)
#my_elites['twitter_name_index'] = my_elites['twitter_name']
#my_elites = my_elites.set_index('twitter_name_index') #using twitter_name = screen_name as index for later
#my_elites.head()
elites_ids = [str(item) for item in my_elites['user_id']]
len(elites_ids) #420
#re-loaded L and R graphs as at the start of this document
#split elites by graph
elites_LEFT = set(L.nodes()).intersection(elites_ids)
len(elites_LEFT) #29
elites_RIGHT = set(R.nodes()).intersection(elites_ids)
len(elites_RIGHT) #35
df = pd.read_csv('RESULTS_df_multiverse_2.csv', index_col=0)
df.head()
df_LEFT = df[df['side']=='LEFT']
df_LEFT.shape #(17788, 13)
user_ids_LEFT = [str(item) for item in df_LEFT['user_id_str']]
df_RIGHT = df[df['side']=='RIGHT']
df_RIGHT.shape #(16496, 13)
user_ids_RIGHT = [str(item) for item in df_RIGHT['user_id_str']]
L_small_nodes = list(elites_LEFT) + user_ids_LEFT
len(L_small_nodes) #17817
L_small = L.subgraph(L_small_nodes)
L_small.number_of_nodes() #17817
L_small.number_of_edges() #26796
R_small_nodes = list(elites_RIGHT) + user_ids_RIGHT
len(R_small_nodes) #16531
R_small = R.subgraph(R_small_nodes)
R_small.number_of_nodes() #16205 - ????
R_small.number_of_edges() #21188
#save these new small graphs
nx.write_gexf(L_small, 'L_small.gexf')
nx.write_gexf(R_small, 'R_small.gexf')
#save as Pajek graphs
nx.write_pajek(L_small, 'L_small.net')
nx.write_pajek(R_small, 'R_small.net')
#calculate hubs and authorities scores
hits_L_small_hubs, hits_L_small_authorities = nx.hits(L_small)
hits_R_small_hubs, hits_R_small_authorities = nx.hits(R_small)
#plot histograms for new hits scores
plt.hist(hits_L_small_hubs.values(), 40, log=False, facecolor='red', alpha=0.5)
plt.savefig('RESULTS/hist_LEFT_small_hubs_with_elites') #have to execute together with line above to avoid saving blank canvas
hits_L_small_hubs_noelites = hits_L_small_hubs ### NB this currently modifies the original list
for item in elites_LEFT:
del hits_L_small_hubs_noelites[item]
len(hits_L_small_hubs_noelites) #17788
len(hits_L_small_hubs) #177788 - NB the original list is also modified!!!
plt.hist(hits_L_small_hubs_noelites.values(), 40, log=False, facecolor='red', alpha=0.5)
plt.savefig('RESULTS/hist_LEFT_small_hubs_noelites') #have to execute together with line above to avoid saving blank canvas
##repeat for RIGHT
#plot histograms for new hits scores
plt.hist(hits_R_small_hubs.values(), 40, log=False, facecolor='blue', alpha=0.5)
plt.savefig('RESULTS/hist_RIGHT_small_hubs_with_elites') #have to execute together with line above to avoid saving blank canvas
hits_R_small_hubs_noelites = hits_R_small_hubs ### NB this currently doesn't help distibguish them - pointless
for item in elites_RIGHT:
del hits_R_small_hubs_noelites[item]
len(hits_R_small_hubs_noelites) #16170
len(hits_R_small_hubs) #16170 - NB modified the original dictionary!!
plt.hist(hits_R_small_hubs_noelites.values(), 40, log=False, facecolor='blue', alpha=0.5)
plt.savefig('RESULTS/hist_RIGHT_small_hubs_noelites') #have to execute together with line above to avoid saving blank canvas
##save these new hubs scores into RESULTS_df_multiverse_4.csv
df = | pd.read_csv('RESULTS_df_multiverse_4.csv', index_col=0) | pandas.read_csv |
# import
import pandas as pd
import sys
import glob
import dask.dataframe as dd
import matplotlib.pyplot as plt
from utils import get_spain_places
import re
# args
raw_tweet_dir = sys.argv[1] # data path
scope = sys.argv[2] # SPA
# read files
# tweets
all_files = glob.glob(raw_tweet_dir + "/ours_*.csv")
raw_tweet_files = dd.read_csv(all_files,usecols=['tweet_id','date','user_id'],dtype={'user_id': 'str'})
raw_tweet_files = raw_tweet_files.compute()
# lang
all_files = glob.glob(raw_tweet_dir + "/tweets_*.csv")
raw_lang_files = dd.read_csv(all_files,usecols=['tweet_id','lang'])
raw_lang_files = raw_lang_files.compute()
# merge files
data = pd.merge(raw_tweet_files,raw_lang_files, on='tweet_id')
# SPA
if scope=='SPA':
# tweets users
all_files = glob.glob(raw_tweet_dir + '/users_loc*.csv')
raw_user_file = dd.read_csv(all_files,usecols=['id_str','location'],dtype={'id_str': 'str'})
raw_user_file = raw_user_file.compute()
raw_user_file.rename({'id_str': 'user_id'}, axis=1, inplace=True)
#
data = data.merge(raw_user_file, on='user_id')
# exclude latam but check spain places > 50k
locations2check = "|".join(get_spain_places(raw_tweet_dir+"/places_spain.csv")) # get lists of spain places > 50k
data = data[data.apply(lambda x: len([s for s in str(x['location']).split() if re.compile(locations2check).match(s.lower())]) > 0, axis=1)]
counting = set(data.loc[data['lang'].str.contains(lang)]['tweet_id'])
print("SPA excluding locations tweets, es",len(counting))
# get stats
data['date'] = | pd.to_datetime(data['date']) | pandas.to_datetime |
# import python libraries
from pickle import TRUE
import pandas as pd
from pandas._libs.missing import NA
from pandas.api.types import is_numeric_dtype
import streamlit as st
import copy
import matplotlib.pyplot as plt
import os
# import user-defined parameter selectors
from models.Regression import regression_param_selector
from models.SVR import svr_param_selector
from models.SVC import svc_param_selector
from models.kerasNN import knn_param_selector
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
# import user-defined functions
from models.utils import model_imports
from utils.functions import img_to_bytes
@st.experimental_memo
def read_csv(path):
return pd.read_csv(path)
def introduction():
st.title("Machine Learning Models for HTP phenotype prediction")
st.subheader("Predict phenotypes from indices and assess the accuracy of predictions")
st.image('./images/field.tif')
st.markdown(
"""
- 🗂️ Choose/upload a dataset
- ⚙️ Pick a phenotype to predict, predictors, model and set its hyper-parameters
- 📉 Train it and check its performance metrics on train and test data
- 🩺 Diagnose possible overfitting and experiment with other settings
-----
"""
)
def dataset_selector():
global current_data
current_data = None
uploaded_file = st.file_uploader("Upload CSV file", key='data_uploader')
if uploaded_file is not None:
current_data = read_csv(uploaded_file)
dataset = "upload"
else:
# st.write("#### Or, choose a pre-loaded dataset")
# dataset = st.selectbox("Choose a dataset", options=("IRRI (2016)","iris")) #########################################
st.write("#### Or, explore with a pre-loaded dataset")
dataset = "IRRI (2016)"
if dataset == "IRRI (2016)":
current_data = read_csv('data/2016DS_merged.csv')
elif dataset == 'iris':
current_data = read_csv('data/iris.csv')
return current_data
def model_selector(goal):
models = {'Regression':['Linear Regression', 'Keras Neural Network', 'SVR'], 'Classification':['SVC', 'Keras Neural Network']}
model_type = st.selectbox("Models", models[goal])
return model_type
def parameter_selector(model_type, goal, nclasses, input_shape=None):
epochs = None
validation_split = None
batch_size = None
if model_type == "Linear Regression":
model, json_param = regression_param_selector()
elif model_type == "Keras Neural Network":
validation_split, epochs, batch_size, model, json_param = knn_param_selector(goal, nclasses, input_shape=input_shape)
elif model_type == "SVR":
model, json_param = svr_param_selector()
elif model_type == 'SVC':
model, json_param = svc_param_selector()
return validation_split, epochs, batch_size, model, json_param
def footer():
st.sidebar.markdown("---")
st.sidebar.markdown(
"""
[<img src='data:image/png;base64,{}' class='img-fluid' width=25 height=25>](https://github.com/songwan/HTPPs) <small> Based on Playground 0.1.0 </small>""".format(
img_to_bytes("./images/github.png")
),
unsafe_allow_html=True,
)
def is_categorical(df, colname):
# if the column is string, then assume it to be categorical
if df[colname].dtype=='object':
return True
# else assume it numerical
else:
return False
def labelencoder(y, yy):
label_encoder = LabelEncoder()
cat_y = label_encoder.fit_transform(y)
cat_y = pd.Series(cat_y)
labely = pd.DataFrame(label_encoder.classes_).transpose()
return cat_y, labely
def onehot_encoder(df):
# One-hot encoding if there is a charictar variable in X
ohe_info = None
ohe_info_concat = pd.DataFrame()
for var in df:
if is_categorical(df, var):
enc = OneHotEncoder(handle_unknown='ignore')
var_ohe = pd.DataFrame(enc.fit_transform(pd.DataFrame(df[var])).toarray())
var_ohe.index = df.index
colname_ohe = enc.get_feature_names_out([var])
var_ohe.columns = colname_ohe
ohe_info = var_ohe.drop_duplicates(ignore_index=True)
ohe_info_concat = pd.concat([ohe_info_concat, ohe_info], axis=1)
df = | pd.merge(df, var_ohe, left_index=True, right_index=True, how='left') | pandas.merge |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import numpy as np
from itertools import islice
import os
import pysam
import gzip
import pdb
import time
import argparse
import warnings
import sys
from tqdm.auto import tqdm
def genotype_finder(path_vcf, path_lookup_table,max_var,output_path,vcf_geno):
#looking for genotypes from vcf file
def genotypeOfHaplotypes(genotype_info):
# set 22 for nonphased genotypes
if not ("|" in genotype_info):
return "22"
genotype_info_Split = genotype_info.split("|")
genotype_hap1 = 0 if genotype_info_Split[0] == "0" else 1
genotype_hap2 = 0 if genotype_info_Split[1] == "0" else 1
return (str(genotype_hap1) + str(genotype_hap2))
def sample_column_map(path, start_col=9, line_key="#CHR"):
stream_in = gzip.open(path, "r")
out_map = {}
for line in stream_in:
if isinstance(line, bytes) and not isinstance(line, str):
line = line.decode()
if line_key in line:
line = line.rstrip().split("\t")
for i in range(start_col,len(line)):
out_map[line[i]] = i
break
stream_in.close()
return(out_map)
vcf_map = sample_column_map(os.path.expanduser(vcf_path))
tabix_vcf = pysam.Tabixfile(os.path.expanduser(vcf_path),"r")
for var_count in range(1,max_var+1):
lookup_table= pd.read_table(path_lookup_table+'haplotype_logExpression_var_'+str(var_count)+'_sort.txt', sep=" ")
lookup_table = lookup_table.reset_index(drop= True)
prediction_matrix_haplotype1= pd.DataFrame();
prediction_matrix_haplotype2= pd.DataFrame();
hap1_individual_genotype_lst = [];
hap2_individual_genotype_lst = [];
temp = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import json
from web_constants import *
from helpers import pd_fetch_tsv, path_or_none
from oncotree import *
""" Load the metadata file to be able to create ProjectData objects """
meta_df = pd.read_csv(META_DATA_FILE, sep='\t', index_col=0)
sigs_mapping_df = pd.read_csv(PROJ_TO_SIGS_FILE, sep='\t')
samples_agg_df = pd.read_csv(SAMPLES_AGG_FILE, sep='\t', index_col=0)
""" Load the Oncotree """
with open(ONCOTREE_FILE) as f:
tree_json = json.load(f)
tree = OncoTree(tree_json)
def get_prepend_proj_id_to_sample_id_func(proj_id, proj_source):
def prepend_proj_id_to_sample_id(sample_id):
if proj_source == "TCGA":
# special case for TCGA, trim ends of sample IDs
# for convenience but also to match PanCanAtlas samples to cBioPortal samples
sample_id = sample_id[:15]
return ("%s %s" % (proj_id, sample_id))
return prepend_proj_id_to_sample_id
# Factory-type function for getting single ProjectData object
def get_project_data(proj_id):
return ProjectData(proj_id, meta_df.loc[proj_id])
def get_selected_project_data(proj_id_list):
return list(map(lambda proj_id: get_project_data(proj_id), proj_id_list))
# Factory-type function for getting list of all ProjectData objects
def get_all_project_data():
row_tuples = meta_df.to_dict(orient='index').items()
return list(map(lambda row: ProjectData(row[0], row[1]), row_tuples))
# Factory-type function for getting 'serialized' list of all ProjectData objects
def get_all_project_data_as_json():
def project_data_to_json(obj):
# Even though this says as_json it is really a list of python objects
oncotree_code = obj.get_oncotree_code()
oncotree_name = obj.get_oncotree_name()
oncotree_tissue_code = obj.get_oncotree_tissue_code()
return {
"id": obj.get_proj_id(),
"name": obj.get_proj_name(),
"num_samples": obj.get_proj_num_samples(),
"source": obj.get_proj_source(),
"has_clinical": obj.has_clinical_df(),
"has_gene_mut": obj.has_gene_mut_df(),
"has_gene_exp": obj.has_gene_exp_df(),
"has_gene_cna": obj.has_gene_cna_df(),
"sigs_mapping": obj.get_sigs_mapping(),
"oncotree_code": (oncotree_code if oncotree_code is not None else "nan"),
"oncotree_name": (oncotree_name if oncotree_name is not None else "nan"),
"oncotree_tissue_code": (oncotree_tissue_code if oncotree_tissue_code is not None else "nan")
}
return list(map(project_data_to_json, get_all_project_data()))
def get_all_tissue_types_as_json():
return [{'oncotree_name':node.name, 'oncotree_code':node.code} for node in tree.get_tissue_nodes()]
"""
Class representing a single row of the META_DATA_FILE,
also how the files referenced within the meta file should be loaded into data frames
"""
class ProjectData():
def __init__(self, proj_id, proj_row):
self.proj_id = proj_id
self.proj_name = proj_row[META_COL_PROJ_NAME]
self.oncotree_code = proj_row[META_COL_ONCOTREE_CODE] if pd.notnull(proj_row[META_COL_ONCOTREE_CODE]) else None
self.oncotree_node = tree.find_node(self.oncotree_code) if pd.notnull(proj_row[META_COL_ONCOTREE_CODE]) else None
self.proj_source = proj_row[META_COL_PROJ_SOURCE]
self.seq_type = proj_row[SEQ_TYPE]
self.counts_paths = {}
# Check for a clinical file
self.clinical_path = path_or_none(proj_row, META_COL_PATH_CLINICAL)
# Check for a samples file
self.samples_path = path_or_none(proj_row, META_COL_PATH_SAMPLES)
# Check for a gene mutation file
self.gene_mut_path = path_or_none(proj_row, META_COL_PATH_GENE_MUT)
self.gene_exp_path = path_or_none(proj_row, META_COL_PATH_GENE_EXP)
self.gene_cna_path = path_or_none(proj_row, META_COL_PATH_GENE_CNA)
for mut_type in MUT_TYPES:
cat_type = MUT_TYPE_MAP[mut_type]
# Check for a counts file for the category type
self.counts_paths[mut_type] = path_or_none(proj_row, META_COL_PATH_MUTS_COUNTS.format(cat_type=cat_type))
# Basic getters
def get_proj_id(self):
return self.proj_id
def get_proj_name(self):
return self.proj_name
def get_oncotree_code(self):
return self.oncotree_code
def get_oncotree_name(self):
if self.oncotree_node is not None:
return self.oncotree_node.name
return None
def get_oncotree_tissue_code(self):
if self.oncotree_node is not None:
return self.oncotree_node.get_tissue_node().code
return None
def get_proj_num_samples(self):
try:
return int(samples_agg_df.loc[self.get_proj_id()]["count"])
except:
return 0
def get_proj_source(self):
return self.proj_source
def get_seq_type(self):
return self.seq_type
# Samples file
def has_samples_df(self):
return (self.samples_path != None)
def get_samples_df(self):
if self.has_samples_df():
samples_df = pd_fetch_tsv(OBJ_DIR, self.samples_path)
samples_df[SAMPLE] = samples_df[SAMPLE].apply(get_prepend_proj_id_to_sample_id_func(self.get_proj_id(), self.get_proj_source()))
samples_df[PATIENT] = samples_df[PATIENT].apply(get_prepend_proj_id_to_sample_id_func(self.get_proj_id(), self.get_proj_source()))
samples_df = samples_df.set_index(SAMPLE, drop=True)
return samples_df
return None
def get_samples_list(self):
counts_df = pd.DataFrame(index=[], data=[])
for mut_type in MUT_TYPES:
if self.has_counts_df(mut_type):
cat_type_counts_df = self.get_counts_df(mut_type)
counts_df = counts_df.join(cat_type_counts_df, how='outer')
counts_df = counts_df.fillna(value=0)
counts_df = counts_df.loc[~(counts_df==0).all(axis=1)]
return list(counts_df.index.values)
def get_counts_sum_series(self):
counts_df = | pd.DataFrame(index=[], data=[]) | pandas.DataFrame |
"""
Test the imbalanced_analysis module.
"""
import pytest
import pandas as pd
from sklearn.base import clone
from sklearn.model_selection import ParameterGrid
from sklearn.datasets import make_classification
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import GradientBoostingClassifier
from imblearn.over_sampling import RandomOverSampler, SMOTE, BorderlineSMOTE
from rlearn.tools.experiment import (
select_results,
combine_results,
ImbalancedExperiment,
GROUP_KEYS,
)
RND_SEED = 23
X1, y1 = make_classification(random_state=RND_SEED, n_features=10, n_samples=50)
X2, y2 = make_classification(random_state=RND_SEED + 2, n_features=20, n_samples=50)
X3, y3 = make_classification(random_state=RND_SEED + 5, n_features=5, n_samples=50)
EXPERIMENT = ImbalancedExperiment(
oversamplers=[
('random', RandomOverSampler(), {}),
('smote', SMOTE(), {'k_neighbors': [2, 3, 4]}),
],
classifiers=[
('dtc', DecisionTreeClassifier(), {'max_depth': [3, 5]}),
('knc', KNeighborsClassifier(), {}),
],
random_state=RND_SEED,
)
DATASETS = [('A', (X1, y1)), ('B', (X2, y2)), ('C', (X3, y3))]
def test_select_results_raise_error():
"""Test raising of error on selection of results."""
imbalanced_results = clone(EXPERIMENT).fit(DATASETS).results_
with pytest.raises(ValueError):
select_results(imbalanced_results, oversamplers_names=['random', 'bsmote'])
with pytest.raises(ValueError):
select_results(imbalanced_results, classifiers_names=['kn'])
with pytest.raises(ValueError):
select_results(imbalanced_results, datasets_names=['D', 'A'])
with pytest.raises(ValueError):
select_results(imbalanced_results, datasets_names=['f1'])
@pytest.mark.parametrize(
'oversamplers_names, classifiers_names, datasets_names, scoring_cols',
[(None, None, None, None), (['random'], ['knc'], ['A', 'C'], ['f1'])],
)
def test_select_results(
oversamplers_names, classifiers_names, datasets_names, scoring_cols
):
"""Test selection of results."""
experiment = clone(EXPERIMENT).set_params(scoring=['f1', 'accuracy']).fit(DATASETS)
selected_results = select_results(
experiment.results_,
oversamplers_names,
classifiers_names,
datasets_names,
scoring_cols,
)
results = selected_results.reset_index()
if oversamplers_names is not None:
assert set(results.Oversampler) == set(oversamplers_names)
else:
assert set(results.Oversampler) == set(experiment.oversamplers_names_)
if classifiers_names is not None:
assert set(results.Classifier) == set(classifiers_names)
else:
assert set(results.Classifier) == set(experiment.classifiers_names_)
if datasets_names is not None:
assert set(results.Dataset) == set(datasets_names)
else:
assert set(results.Dataset) == set(experiment.datasets_names_)
unique_scoring_cols = set([scorer[0] for scorer in selected_results.columns])
if scoring_cols is not None:
assert unique_scoring_cols == set(scoring_cols)
else:
assert unique_scoring_cols == set(experiment.scoring_cols_)
def test_combine_results_datasets():
"""Test the combination of experimental results for different datasets."""
# Clone and fit experiments
experiment1 = clone(EXPERIMENT).fit(DATASETS[:-1])
experiment2 = clone(EXPERIMENT).fit(DATASETS[-1:])
# Extract combined results
combined_results = combine_results(experiment1.results_, experiment2.results_)
results = combined_results.reset_index()
# Assertions
assert set(results.Dataset) == {'A', 'B', 'C'}
assert set(results.Oversampler) == {'random', 'smote'}
assert set(results.Classifier) == {'dtc', 'knc'}
assert set([scorer[0] for scorer in combined_results.columns]) == set(['accuracy'])
pd.testing.assert_frame_equal(
combined_results,
pd.concat([experiment1.results_, experiment2.results_]).sort_index(),
)
def test_combine_results_ovrs():
"""Test the combination of experimental results for different oversamplers."""
# Clone and fit experiments
experiment1 = (
clone(EXPERIMENT)
.set_params(
oversamplers=[('bsmote', BorderlineSMOTE(), {'k_neighbors': [2, 5]})]
)
.fit(DATASETS)
)
experiment2 = clone(EXPERIMENT).fit(DATASETS)
# Extract combined results
combined_results = combine_results(experiment1.results_, experiment2.results_)
results = combined_results.reset_index()
# Assertions
assert set(results.Dataset) == {'A', 'B', 'C'}
assert set(results.Oversampler) == {'random', 'smote', 'bsmote'}
assert set(results.Classifier) == {'dtc', 'knc'}
assert set([scorer[0] for scorer in combined_results.columns]) == set(['accuracy'])
pd.testing.assert_frame_equal(
combined_results,
| pd.concat([experiment1.results_, experiment2.results_]) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created 23 April 2019
mean_traces.py
Version 1
The purpose of this script is to pull all of the mean trace files that were
saved from the initial analysis. These traces are mean subtracted and filtered
and comprise the entire 6 s of recording. The idea here is to open the files
individually, extract the data, save it to a dataframe and compile all of the
files of the same genotype into a dataframe. Then take the mean. Then plot the
means vs. all traces for both OMP and Gg8.
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import platform
''' ################## Define file structure on server #################### '''
# home_dir will depend on the OS, but the rest will not
# query machine identity and set home_dir from there
machine = platform.uname()[0]
if machine == 'Darwin':
home_dir = '/Volumes/Urban'
elif machine == 'Linux':
home_dir = '/run/user/1000/gvfs/smb-share:server=192.168.3.11,share=urban'
elif machine == 'Windows':
home_dir = os.path.join('N:', os.sep, 'urban')
else:
print("OS not recognized. \nPlease see Nate for correction.")
project_dir = os.path.join(home_dir, 'Huang', 'OSN_OMPvGg8_MTC')
figure_dir = os.path.join(project_dir, 'figures')
table_dir = os.path.join(project_dir, 'tables')
data_dir = os.path.join(project_dir, 'data')
''' ##########################################################################
This is all the analysis, figures, saving
Read in file metadata, open file from igor, convert to pandas
##############################################################################
'''
# grab all files in table_dir
file_list = os.listdir(table_dir)
trace_files = []
cell_ids = []
for file in file_list:
if 'timeseries' in file:
trace_files.append(file)
cell_id = file.split('_')[1] + '_' + file.split('_')[2]
cell_ids.append(cell_id)
else:
continue
traces_df = pd.DataFrame({'file name': trace_files, 'cell id': cell_ids})
# grab data_notes to select out by cell type
analyzed_data_notes = pd.read_csv(os.path.join(table_dir +'analyzed_data_notes.csv'), index_col=0)
mc_df = analyzed_data_notes[analyzed_data_notes['Cell type'] == 'MC']
# pull out gg8 cells
mc_gg8_df = mc_df[mc_df['Genotype'] == 'Gg8']
mc_gg8_list = mc_gg8_df['Cell name'].to_list()
mc_gg8_list = [name.split('_')[0] + '_' + name.split('_')[1] for name in mc_gg8_list]
mc_gg8_df = pd.DataFrame(mc_gg8_list, columns=['cell id'])
# pull out omp cells
mc_omp_df = mc_df[mc_df['Genotype'] == 'OMP']
mc_omp_list = mc_omp_df['Cell name'].to_list()
mc_omp_list = [name.split('_')[0] + '_' + name.split('_')[1] for name in mc_omp_list]
mc_omp_df = pd.DataFrame(mc_omp_list, columns=['cell id'])
# make list of Gg8 MCs
gg8_mcs = pd.merge(traces_df, mc_gg8_df)
gg8_mc_list = gg8_mcs['file name'].to_list()
# make list of OMP MCs
omp_mcs = pd.merge(traces_df, mc_omp_df)
omp_mc_list = omp_mcs['file name'].to_list()
# create empty dataframes for gg8 and omp cells
gg8_cells = pd.DataFrame()
omp_cells = pd.DataFrame()
# loop through all files, extract data and add to appropriate dataframes
for file in gg8_mc_list:
# open file and extract data into a new dataframe
mean_trace = pd.read_csv(os.path.join(table_dir, file), header=None)
gg8_cells = | pd.concat([gg8_cells, mean_trace], axis=1, ignore_index=True) | pandas.concat |
"""
Functions to clean up neighborhood data
and feed into interactive charts
"""
import numpy as np
import pandas as pd
from datetime import date, timedelta
S3_FILE_PATH = "s3://public-health-dashboard/jhu_covid19/"
NEIGHBORHOOD_URL = f"{S3_FILE_PATH}la-county-neighborhood-time-series.parquet"
CROSSWALK_URL = f"{S3_FILE_PATH}la_neighborhoods_population_crosswalk.parquet"
NEIGHBORHOOD_APPENDED_URL = f"{S3_FILE_PATH}la-county-neighborhood-testing-appended.parquet"
def clean_data():
df = | pd.read_parquet(NEIGHBORHOOD_URL) | pandas.read_parquet |
import pandas as pd
import sys
### COMMON TO HYPERGRAPH AND SIMPLE GRAPH
def makeDefs(DEFS, opts={}):
defs = {key: opts[key] if key in opts else DEFS[key] for key in DEFS}
base_skip = opts['SKIP'] if 'SKIP' in opts else defs['SKIP']
skip = [x for x in base_skip] #copy
defs['SKIP'] = skip
for key in DEFS:
if not defs[key] in skip:
skip.append(defs[key])
return defs
def screen_entities(events, entity_types, defs):
base = entity_types if not entity_types == None else events.columns
return [x for x in base if not x in defs['SKIP']]
def col2cat(cat_lookup, col):
return cat_lookup[col] if col in cat_lookup else col
def make_reverse_lookup(categories):
lookup = {}
for category in categories:
for col in categories[category]:
lookup[col] = category
return lookup
def valToSafeStr (v):
if sys.version_info < (3,0):
t = type(v)
if t is unicode: # noqa: F821
return v
elif t is str:
return v
else:
return repr(v)
else:
t = type(v)
if t is str:
return v
else:
return repr(v)
#ex output: pd.DataFrame([{'val::state': 'CA', 'nodeType': 'state', 'nodeID': 'state::CA'}])
def format_entities(events, entity_types, defs, drop_na):
cat_lookup = make_reverse_lookup(defs['CATEGORIES'])
lst = sum([[{
col: v,
defs['TITLE']: v,
defs['NODETYPE']: col,
defs['NODEID']: col2cat(cat_lookup, col) + defs['DELIM'] + valToSafeStr(v)
}
for v in events[col].unique() if not drop_na or valToSafeStr(v) != 'nan'] for col in entity_types], [])
df = pd.DataFrame(lst)
df[defs['CATEGORY']] = df[defs['NODETYPE']].apply(lambda col: col2cat(cat_lookup, col))
return df
DEFS_HYPER = {
'TITLE': 'nodeTitle',
'DELIM': '::',
'NODEID': 'nodeID',
'ATTRIBID': 'attribID',
'EVENTID': 'EventID',
'CATEGORY': 'category',
'NODETYPE': 'type',
'EDGETYPE': 'edgeType',
'SKIP': [],
'CATEGORIES': {} # { 'categoryName': ['colName', ...], ... }
}
#ex output: pd.DataFrame([{'edgeType': 'state', 'attribID': 'state::CA', 'eventID': 'eventID::0'}])
def format_hyperedges(events, entity_types, defs, drop_na, drop_edge_attrs):
is_using_categories = len(defs['CATEGORIES'].keys()) > 0
cat_lookup = make_reverse_lookup(defs['CATEGORIES'])
subframes = []
for col in sorted(entity_types):
fields = list(set([defs['EVENTID']] + ([x for x in events.columns] if not drop_edge_attrs else [col])))
raw = events[ fields ]
if drop_na:
raw = raw.dropna()
raw = raw.copy()
if len(raw):
if is_using_categories:
raw[defs['EDGETYPE']] = raw.apply(lambda r: col2cat(cat_lookup, col), axis=1)
raw[defs['CATEGORY']] = raw.apply(lambda r: col, axis=1)
else:
raw[defs['EDGETYPE']] = raw.apply(lambda r: col, axis=1)
raw[defs['ATTRIBID']] = raw.apply(lambda r: col2cat(cat_lookup, col) + defs['DELIM'] + valToSafeStr(r[col]), axis=1)
if drop_edge_attrs:
raw = raw.drop([col], axis=1)
subframes.append(raw)
if len(subframes):
result_cols = list(set(
([x for x in events.columns.tolist() if not x == defs['NODETYPE']]
if not drop_edge_attrs
else [])
+ [defs['EDGETYPE'], defs['ATTRIBID'], defs['EVENTID']]
+ ([defs['CATEGORY']] if is_using_categories else []) ))
out = pd.concat(subframes, ignore_index=True).reset_index(drop=True)[ result_cols ]
return out
else:
return | pd.DataFrame([]) | pandas.DataFrame |
# Library
import pandas as pd
import numpy as np
import datetime as dt
import time,datetime
import math
from math import sin, asin, cos, radians, fabs, sqrt
from geopy.distance import geodesic
from numpy import NaN
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.pipeline import Pipeline
import sklearn
from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn import tree
from sklearn.model_selection import GridSearchCV
from sklearn import metrics
from IPython.display import Image
from sklearn.metrics import classification_report
from sklearn.metrics import precision_recall_curve
import matplotlib.pyplot as plt
import random
from sklearn.ensemble import RandomForestClassifier
import eli5
from eli5.sklearn import PermutationImportance
from matplotlib import pyplot as plt
from pdpbox import pdp, get_dataset, info_plots
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import precision_score,recall_score,f1_score,roc_auc_score,roc_curve
import sys
EARTH_RADIUS=6371
# Common Utilities
def num2date(num):
# Convert eventid in GTD to standard time format
num = str(num)
d = num[:4]+'/'+num[4:6]+'/'+num[6:8]
tmp = dt.datetime.strptime(d, '%Y/%m/%d').date()
return tmp
def num2date_(num):
# Convert time of market data to standard time format
num = str(num)
d = num[:4]+'/'+num[5:7]+'/'+num[8:10]
tmp = dt.datetime.strptime(d, '%Y/%m/%d').date()
return tmp
def get_week_day(date):
day = date.weekday()
return day
def hav(theta):
s = sin(theta / 2)
return s * s
def get_distance_hav(lat0, lng0, lat1, lng1):
# The distance between two points of a sphere is calculated by the haversine formula
# Longitude and latitude convert to radians
lat0 = radians(lat0)
lat1 = radians(lat1)
lng0 = radians(lng0)
lng1 = radians(lng1)
dlng = fabs(lng0 - lng1)
dlat = fabs(lat0 - lat1)
h = hav(dlat) + cos(lat0) * cos(lat1) * hav(dlng)
distance = 2 * EARTH_RADIUS * asin(sqrt(h))
return distance
# Load the population density data - https://sedac.ciesin.columbia.edu/data/set/spatialecon-gecon-v4
def load_eco(filename,country):
basic_ec_file1 = filename
basic_ec = pd.read_excel(basic_ec_file1, country,header=0) # Load the page of Israel
lonlat_list = []
for i in range(basic_ec.shape[0]):
temp = []
temp.append(basic_ec.iloc[i]['LONGITUDE'])
temp.append(basic_ec.iloc[i]['LAT'])
lonlat_list.append(temp)
return lonlat_list
# Make terrorist attack features
def gtd_one_hot(gtd):
# Group the features at daily level
gtd_grouped = gtd.groupby(gtd['Timestamp']).sum()
# Occurrence measure
gtd_grouped['occur_count'] = gtd.groupby(gtd['Timestamp']).size()
# Maintain the max nightlight value each day
gtd_grouped['nightlight'] = gtd.groupby(gtd['Timestamp'])['nightlight'].max()
# Obtain the weekday of certain timestamp
gtd_grouped['week'] = gtd.groupby(gtd['Timestamp'])['week'].mean()
return gtd_grouped
def lag(df,col_name,count):
# Shift the column
for i in range(1,count+1):
df[col_name + '_' + str(i)] = df[col_name].shift(i)
return df
def compute_nl(lon,lat):
# Map certain geographic position to corresponding value of nightlight intensity
round_lon = round((lon+180)*37.5)
round_lat = 6750-round((lat+90)*37.5)
try:
return nl[int(round_lat)][int(round_lon)]
except:
return 0
def contain_or_not(string,list_):
if string in list_:
return 1
else:
return 0
def adjust_week(timestamp,week):
# Adjust the weekend to friday
if week == 5:
return (timestamp+datetime.timedelta(days=2)).strftime("%Y/%m/%d")
elif week == 6:
return (timestamp+datetime.timedelta(days=1)).strftime("%Y/%m/%d")
return timestamp.strftime("%Y/%m/%d")
# Make the market features
def get_market_data(start,end,ref,goal,host,user,password,db):
con = pymysql.connect(host,user,password,db, charset='utf8' )
# Reference Index
cmd1 = "select * from " + ref + " where Timestamp >= " + start + ' and Timestamp <= ' + end
ref_df = pd.read_sql(cmd1, con)
#Goal Index
cmd2 = "select * from " + goal + " where Timestamp >= " + start + ' and Timestamp <= ' + end
goal_df = pd.read_sql(cmd2, con)
return ref_df,goal_df
def get_diff(origin_md,name):
md = origin_md.copy()
str1 = 'logdiff_' + name
str2 = 'twologdiff_' + name
md['close_shift1'] = md['Trade Close'].shift(1)
md['onediff'] = md['Trade Close'].diff()
md['open_shift_minus1'] = md['Trade Open'].shift(-1)
md['twodiff'] = md['open_shift_minus1']-md['close_shift1']
md = md.dropna()
md[str1] = md['onediff']/md['close_shift1'] < 0
md[str2] = md['twodiff']/md['close_shift1'] < 0
md_onediff = pd.DataFrame(md,columns = ['Timestamp',str1]).dropna()
md_twodiff = pd.DataFrame(md,columns = ['Timestamp',str2]).dropna()
return md_onediff,md_twodiff
# Merge terrorist attack features and market features
def diff_merge(gtd_grouped,diff_list):
for i in range(1,len(diff_list)):
diff_feature = pd.merge(diff_list[i-1],diff_list[i],on='Timestamp')
diff_feature = diff_feature.dropna()
diff_feature = pd.merge(gtd_grouped,diff_feature,on='Timestamp',how='right')
return diff_feature
def lag_part(feature,lag_features,lag_numbers):
for i in range(len(lag_features)):
feature = lag(feature,lag_features[i],lag_numbers[i])
return feature
def reset_df(df,target_col,index):
cols = list(df)
cols.insert(index, cols.pop(cols.index(target_col)))
df = df.loc[:, cols]
return df
def final_process(gtd_grouped,diff_list,lag_features,lag_numbers,target_col,future_drop_col):
feature = diff_merge(gtd_grouped,diff_list)
feature.sort_values("Timestamp",inplace=True)
feature = feature.fillna(0)
feature = lag_part(feature,lag_features,lag_numbers)
feature = reset_df(feature,target_col,len(feature.columns.values)-1)
feature = feature.drop(future_drop_col,axis=1)
feature.rename(columns={target_col: 'target'}, inplace = True)
feature = feature.dropna()
return feature
def train_test(features,split_point):
y = list(features['target'])
X = features.drop(['target','Timestamp'],axis=1)
x = X.values
var_list = list(X)
X_train,X_test,Y_train,Y_test = x[:split_point],x[split_point:],y[:split_point],y[split_point:]
return X_train,X_test,Y_train,Y_test,var_list
def pr_(y_test,y_pred):
realminus = 0
predminus = 0
correct = 0
for ii in range(len(y_test)):
if y_test[ii] == True:
realminus += 1
if y_pred[ii] == True:
predminus += 1
if y_test[ii] == True and y_pred[ii] == True:
correct += 1
if predminus == 0:
precision = 1
else:
precision = correct/predminus
recall = correct/realminus
if recall == 0:
precision,recall = 1,0
return correct,predminus,correct,realminus
def split_pos_neg(feature,y_pred):
# Display the performance in days with terrorist attacks and days without terrorist attacks
testset = feature[cut_point:].copy()
testset = testset.reset_index()
pred_content = pd.Series(y_pred)
testset['pred'] = pred_content
testset1 = testset[(testset['occur_count'] >= 1)]
y_pred_ = list(testset1['pred'])
y_test_ = list(testset1['target'])
precision, recall = pr(y_test_,y_pred_)
f1 = 2*(precision*recall)/(precision+recall)
print(precision, ' ',recall,' ',f1)
print(classification_report(y_test_,y_pred_))
testset1 = testset[(testset['occur_count'] == 0)]
y_pred_ = list(testset1['pred'])
y_test_ = list(testset1['target'])
precision, recall = pr(y_test_,y_pred_)
f1 = 2*(precision*recall)/(precision+recall)
print(precision, ' ',recall,' ',f1)
print(classification_report(y_test_,y_pred_))
def best_para(x_train,x_val,y_train,y_val):
mf1=0
mins=0
maxd=0
for j in range(5,10):
for i in range(15,32):
clf = tree.DecisionTreeClassifier(min_samples_leaf = i,max_depth = j)
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
y_pred_pre = clf.predict_proba(x_val)
precision, recall = pr(y_val,y_pred)
f1 = 2*(precision*recall)/(precision+recall)
if(f1>mf1):
mf1=f1
mins=i
maxd=j
return mf1,mins,maxd
def train_dt(feature,cut_point,samples_leaf=1,depth=100):
x_train,x_test,y_train,y_test,var_list = train_test(feature,cut_point)
y_pred = clf.predict(x_test)
y_pred_pre = clf.predict_proba(x_test,min_samples_leaf = samples_leaf,max_depth = depth)
print(classification_report(y_test,y_pred))
im = clf.feature_importances_
print(im)
precision, recall = pr(y_test,y_pred)
f1 = 2*(precision*recall)/(precision+recall)
print(precision, ' ',recall,' ',f1)
split_pos_neg(feature,y_pred)
def experment_full_sample(feature, cut_point):
# Market Only Baseline - Exp-FS
test = pd.DataFrame(feature, columns=['Timestamp','logdiff_sp500','logdiff_sp500_1','twologdiff_is100_1','twologdiff_is100_2','logdiff_sp500_2','target'])
train_dt(test,cut_point)
# Exp-FS
X_train,x_val,Y_train,y_val,var_list = train_test(features[:cut_point],val_cut_point)
_,mins,maxd = best_para(x_train,x_val,y_train,y_val)
train_dt(feature,cut_point)
def experment_terr(feature, cut_point):
# Exp-Terr
feature_ = feature.copy()
feature_ = feature_[(feature_['occur_count'] >= 1)]
val_cut_point_terr = 320
cut_point_terr = 415
## Market Only Baseline - Exp-Terr
test = pd.DataFrame(feature_, columns=['Timestamp','logdiff_sp500','logdiff_sp500_1','twologdiff_is100_1','twologdiff_is100_2','logdiff_sp500_2','target'])
train_dt(test,cut_point_terr)
# Exp-Terr
X_train,x_val,Y_train,y_val,var_list = train_test(feature_[:cut_point_terr],val_cut_point_terr)
_,mins,maxd = best_para(x_train,x_val,y_train,y_val)
train_dt(feature_,cut_point_terr)
def one_step_ahead(feature):
# One step ahead - Need to load the terrorist attack data extract from news since startdate
# Merge GTD data prior to that startdata and terrorist attack data extract from news since startdate
gtd_news = pd.read_excel('reuters.xlsx','Israel')
## rechange the load GTD data part
gtd = gtd_original[gtd_original['country'] == 97]
gtd = gtd[gtd['iday']!=0]
gtd['Timestamp'] = gtd['eventid'].apply(num2date)
gtd = gtd[['Timestamp','latitude','longitude','nkill','nwound','city','provstate']]
gtd = gtd.dropna()
startdate = '2007-01-01'
gtd = gtd[gtd['Timestamp'] < dt.datetime.strptime(startdate, '%Y-%m-%d').date()]
gtd_news['Timestamp'] = gtd_news['Timestamp'].apply(num2date_)
gtd = pd.concat([gtd,gtd_news])
feature_all = feature.copy()
feature = feature[feature['occur_count'] != 0]
startdate = '2007-01-01'
feature_train = feature[feature['Timestamp'] < dt.datetime.strptime(startdate, '%Y-%m-%d').date()]
feature_test = feature[feature['Timestamp'] >= dt.datetime.strptime(startdate, '%Y-%m-%d').date()]
test_time = list(feature_test['Timestamp'])
# Market-only baseline and full-feature version for one-step-ahead
fall_count = 0
fall_predict_true = 0
fall_predict_count = 0
fall_predict_count_true = 0
for i in range(len(test_time)):
train_set = pd.concat([feature_train[-feature_train.shape[0]+i:], feature_test[0:i]])
test_set = feature_test[i:i+1]
test_set = test_set.drop([], 1)
# market-only version
# x_train,x_test,y_train,y_test,var_list_market = train_test(train_set[['Timestamp','logdiff_sp500','logdiff_sp500_1','twologdiff_is100_1','twologdiff_is100_2','logdiff_sp500_2','target']],train_set.shape[0])
# full-feature version
x_train,x_test,y_train,y_test,var_list = train_test(train_set,train_set.shape[0])
time = str((test_set['Timestamp'].values)[0])
y = list(test_set['target'])
# market-only version
# X = test_set[['logdiff_sp500','logdiff_sp500_1','twologdiff_is100_1','twologdiff_is100_2','logdiff_sp500_2']]
# full-feature version
X = test_set.drop(['target','Timestamp'],axis=1)
# market-only version
# clf = tree.DecisionTreeClassifier()
# full-feature version
clf = tree.DecisionTreeClassifier(min_samples_leaf = 26)
clf.fit(x_train, y_train)
y_pred = clf.predict(X)
if y == [1]:
fall_count += 1
if y_pred == [1]:
fall_predict_true += 1
if y_pred == [1]:
fall_predict_count += 1
if y == [1]:
fall_predict_count_true += 1
plusprecision = fall_predict_count_true/fall_predict_count
plusrecall = fall_predict_true/fall_count
f1 = 2*(plusprecision*plusrecall)/(plusprecision+plusrecall)
print(plusprecision,' ',plusrecall,' ',f1)
print(fall_predict_count_true,' ',fall_predict_count,' ',fall_predict_true,' ',fall_count)
def main(argv):
# Load the population density data - https://sedac.ciesin.columbia.edu/data/set/spatialecon-gecon-v4
lonlat_list = load_eco('basic_eco.xls',"Israel")
# Load the nightlight data - https://eoimages.gsfc.nasa.gov/images/imagerecords/144000/144897/BlackMarble_2016_3km_gray_geo.tif
gray_file = open("nightlight.csv","rb")
nl_tmp = np.loadtxt(gray_file,delimiter=',',skiprows=0)
gray_file.close()
nl = np.array(nl_tmp)
# Load the GTD data - https://www.start.umd.edu/gtd/
gtd_original = pd.read_excel('gtd90_17.xlsx')
gtd = gtd_original[gtd_original['country'] == 97]
gtd = gtd[gtd['iday']!=0]
gtd['Timestamp'] = gtd['eventid'].apply(num2date)
gtd = gtd[['Timestamp','latitude','longitude','nkill','nwound','city','provstate']]
gtd = gtd.dropna()
# capital/cultural center/religious center labels - From Wikipedia
capital = ['Jerusalem','Nazareth','Haifa','Ramla','Tel Aviv','Beersheva']
cultural_center = ['Tel Aviv']
religious_center = ['Jerusalem']
gtd['capital'] = gtd['city'].apply(contain_or_not,args=(capital,))
gtd['cultural_center'] = gtd['city'].apply(contain_or_not,args=(cultural_center,))
gtd['religious_center'] = gtd['city'].apply(contain_or_not,args=(religious_center,))
# One-hot encoding of provstate
gtd = gtd.join( | pd.get_dummies(gtd.provstate) | pandas.get_dummies |
'''
Stage 1 involves selected the major cryptos
based on a criteria of exceedance of market capitalization above $4bn
Don't forget to set the folder in which this script is held in
as the current working folder before running the script
'''
# import needed libraries
import pandas as pd
import time
from tqdm import tqdm
pd.options.mode.chained_assignment = None
# read historical market cap table
df = | pd.read_html('https://coinmarketcap.com/historical/20180429/') | pandas.read_html |
import os
import pandas as pd
import uproot3 as uproot
from tqdm import tqdm
import json
def check_integrity(basedir, period, samples, TreeName="selection", mode="normal", verbose=False):
"""
Check integrity of jobs results.
Args:
basedir (str): Path to analysis root folder
period (str): Jobs period used in anafile
TreeName (str): Tree name used in ROOTfile
samples (dict): Dictionary mapping each event flavour to job directories
systematics (dict): Dictionary defining the systematic universes
Returns:
tuple: (Dictonary with datasets statistics, list of old jobs, list of jobs with errors)
"""
Integrity_Jobs = []
Error_OldJobs = []
Error_Output = []
Resubmit_Jobs = []
if mode == "syst":
with open(os.path.join(basedir, "lateral_systematics.json")) as json_sys_file:
systematics = json.load(json_sys_file)
#print(systematics)
has_tag = False # Remove if CMS join 2016 samples again
for datasets in tqdm(samples.keys()):
count_good = 0
count_bad = 0
Nentries = 0
job = "None"
for dataset in samples[datasets]:
dataset_year = dataset.split("_files_")[0]
dataset_year = dataset_year.split("_")[-1]
dataset_tag = dataset.split("_"+dataset_year)[0][-3:]
if (dataset_year == period):
#print(dataset)
if( dataset_tag == "APV" ):
has_tag = True
control = 0
jobs_file = os.path.join(basedir, "jobs.txt")
with open(jobs_file) as f:
for line in f:
#if dataset == line[:-1]:
info = line.split(" ")
#print(info)
info_source = info[8].split(",")[0]
#info_universe = info[9].split("]")[0]
#print(info_source)
#print(info_universe)
job_line = info[2][3:-2] + "_files_" + info[6][:-1] + "_" + str(int(info[7][:-1])-1)
if( (dataset == job_line) and (info_source == "0") ):
control = 1
job = line
job = "[["+job.split("[[")[1]
if control == 0:
Error_OldJobs.append(dataset)
cutflow = os.path.join(basedir, dataset, "cutflow.txt")
bad_0_0 = False
control = 0
if os.path.isfile(cutflow):
N_entries = 0
with open(cutflow) as f:
for line in f:
control += line.count("Time to process the selection")
if line[:28] == "Number of entries considered" :
N_entries = int(line.split()[4])
try:
if control == 1 and N_entries > 0:
root_file = os.path.join(basedir, dataset, "Tree.root")
if os.path.isfile(root_file):
f = uproot.open(root_file)
if len(f.keys()) == 1:
#count_good += 1
tree = f[TreeName]
df = tree.pandas.df(flatten=False)
Nentries += len(df)
del df
#print("")
if mode == "syst":
sys_control = 0
for sys_source in systematics.keys():
sys_list = systematics[sys_source]
if( (sys_list[0] > 0) and (datasets[:4] == "Data") ):
continue
for universe in range(sys_list[1]):
#print(universe)
sys_file = str(sys_list[0]) + "_" + str(universe) + ".json"
sys_file = os.path.join(basedir, dataset, "Systematics", sys_file)
#print(sys_file)
#print(os.path.isfile(sys_file))
if os.path.isfile(sys_file):
#print(os.stat(sys_file).st_size > 0)
if os.stat(sys_file).st_size > 0:
with open(sys_file) as json_file:
sys_dict = json.load(json_file)
#print(sys_dict.keys())
if len(sys_dict) == 0:
sys_control += 1
else:
sys_control += 1
else:
sys_control += 1
if sys_control == 0:
count_good += 1
else:
count_bad += 1
Error_Output.append(dataset)
else:
count_good += 1
else:
count_bad += 1
Error_Output.append(dataset)
Resubmit_Jobs.append(job)
bad_0_0 = True
else:
count_bad += 1
Error_Output.append(dataset)
Resubmit_Jobs.append(job)
bad_0_0 = True
else:
count_bad += 1
Error_Output.append(dataset)
Resubmit_Jobs.append(job)
bad_0_0 = True
except Exception as ex:
print(str(ex), '---->', dataset)
count_bad += 1
Error_Output.append(dataset)
Resubmit_Jobs.append(job)
bad_0_0 = True
else:
count_bad += 1
Error_Output.append(dataset)
Resubmit_Jobs.append(job)
bad_0_0 = True
if mode == "syst":
for sys_source in systematics.keys():
if( (sys_source == "CV") and bad_0_0 ):
continue
else:
sys_list = systematics[sys_source]
if( (sys_list[0] > 0) and (datasets[:4] == "Data") ):
continue
for universe in range(sys_list[1]):
sys_file = str(sys_list[0]) + "_" + str(universe) + ".json"
sys_file = os.path.join(basedir, dataset, "Systematics", sys_file)
job_eff = job[:-7] + str(sys_list[0]) + ", " + str(universe) + "]," + "\n"
#print(job_eff)
if os.path.isfile(sys_file):
if os.stat(sys_file).st_size > 0:
temporary = 0
#with open(sys_file) as json_file:
#sys_dict = json.load(json_file)
#if len(sys_dict) == 0:
# Resubmit_Jobs.append(job)
else:
Resubmit_Jobs.append(job_eff)
else:
Resubmit_Jobs.append(job_eff)
Integrity_Jobs.append({
"Dataset": datasets,
"nFolders": len(samples[datasets]),
"Good": count_good,
"Bad": str(count_bad),
"Entries": Nentries
})
if len(Resubmit_Jobs) > 0:
if( has_tag ):
file_name = os.path.join(basedir, "resubmit_APV_" + period + ".txt")
else:
file_name = os.path.join(basedir, "resubmit_" + period + ".txt")
resubmit_file = open(file_name, "w")
for i in range(len(Resubmit_Jobs)):
resubmit_file.write(Resubmit_Jobs[i])
if verbose:
Integrity_Jobs = | pd.DataFrame(Integrity_Jobs) | pandas.DataFrame |
import numpy as np
from ripser import ripser
from scipy.spatial.distance import pdist, squareform
from gudhi.clustering.tomato import Tomato
from umap import UMAP
import pandas as pd
from tqdm import tqdm
from cosine_hack import umap_hack
def calculate_persistence(
cluster, num_of_neurons, maxdim=1, coeff=47, num_longest_bars=10
):
print(cluster.shape[0])
if (num_of_neurons < 400) and (cluster.shape[0] > 4000):
try:
layout = umap_hack(
activity=cluster,
n_components=num_of_neurons,
verbose=True,
n_neighbors=20,
min_dist=0.01,
)
except KeyError:
return np.array([-1])
else:
layout = UMAP(
n_components=num_of_neurons,
verbose=True,
n_neighbors=20,
min_dist=0.01,
metric="cosine",
).fit_transform(cluster)
distance = squareform(pdist(layout, "euclidean"))
thresh = np.max(distance[~np.isinf(distance)])
diagrams = ripser(
X=distance,
maxdim=maxdim,
coeff=coeff,
do_cocycles=False,
distance_matrix=True,
thresh=thresh,
)["dgms"][1].T
births1 = diagrams[0] # the time of birth for the 1-dim classes
deaths1 = diagrams[1] # the time of death for the 1-dim classes
lives1 = deaths1 - births1 # the lifetime for the 1-dim classes
if len(lives1) > num_longest_bars:
iMax = np.argsort(lives1)
return lives1[iMax[-num_longest_bars:]]
else:
return lives1
def cluster_activity(activity):
layout = umap_hack(
activity=activity,
n_components=activity.shape[1],
verbose=True,
n_neighbors=15,
min_dist=0.01,
)
# logDTM, DTM, ‘KDE’ or ‘logKDE’
n_clusters = activity.shape[0] // 1200 # avrage cluster size
return Tomato(density_type="logDTM", k=200, n_clusters=n_clusters).fit_predict(
layout
)
def find_circles(layer):
activity = np.load(f"activations/MNIST/{layer}.npy")
large_cluster_size = activity.shape[1]
clustering = cluster_activity(activity=activity)
unique, counts = np.unique(clustering, return_counts=True)
large_clusters = [
unique[i] for i, count in enumerate(counts) if count > large_cluster_size
]
print(
f"{len(unique)} clusters fund. {len(large_clusters)} large clusters bigger than {large_cluster_size}."
)
num_longest_bars, coeff = 10, 47
cluster_info = {
"cluster_id": [],
"cluster_size": [],
"cluster_members": [],
"longest_bar": [],
f"Top {num_longest_bars} longest bars": [],
}
pbar = tqdm(total=len(large_clusters))
for index in large_clusters:
cluster_members = np.array(
[n for n, cluster in enumerate(clustering) if cluster == index]
)
longest_bars = calculate_persistence(
cluster=activity[cluster_members],
num_of_neurons=activity.shape[1],
coeff=coeff,
num_longest_bars=num_longest_bars,
)
cluster_info["cluster_id"].append(index)
cluster_info["cluster_size"].append(cluster_members.shape[0])
cluster_info["cluster_members"].append(cluster_members)
cluster_info["longest_bar"].append(longest_bars.max())
cluster_info[f"Top {num_longest_bars} longest bars"].append(longest_bars)
pbar.update(1)
df = | pd.DataFrame.from_dict(data=cluster_info) | pandas.DataFrame.from_dict |
from experiment.general import general
from plots.rec_plots import precision_recall_curve
from utils.io import load_numpy, save_dataframe_csv, find_best_hyperparameters, load_yaml
from utils.modelnames import models
import argparse
import pandas as pd
import timeit
def main(args):
table_path = load_yaml('config/global.yml', key='path')['tables']
df = find_best_hyperparameters(table_path+args.tuning_result_path, 'NDCG')
R_train = load_numpy(path=args.data_dir, name=args.train_set)
R_valid = load_numpy(path=args.data_dir, name=args.valid_set)
R_test = load_numpy(path=args.data_dir, name=args.test_set)
R_train = R_train + R_valid
topK = [5, 10, 15, 20, 50]
frame = []
for idx, row in df.iterrows():
start = timeit.default_timer()
row = row.to_dict()
row['metric'] = ['R-Precision', 'NDCG', 'Precision', 'Recall', "MAP"]
row['topK'] = topK
result = general(R_train,
R_test,
row,
models[row['model']],
measure=row['similarity'],
gpu_on=args.gpu,
model_folder=args.model_folder)
stop = timeit.default_timer()
print('Time: ', stop - start)
frame.append(result)
results = | pd.concat(frame) | pandas.concat |
import math
import os
from os.path import join as pjoin
import json
import copy
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import GPUtil
import pandas as pd
from multiprocessing import Pool
from tqdm import tqdm
import sklearn.metrics
from .config import print_config, class_labels
from .utils import (
anno_to_binary, cut_score, debug, display_imgs, info, gen_cwd_slash, labels_to_str, load_config, load_img,
np_macro_f1, str_to_labels, class_id_to_label, class_ids_to_label, combine_windows, chunk, compute_i_coords,
format_macro_f1_details, vec_to_str
)
# from .utils_heavy import predict, model_from_config
from .ignite_trainer import predict as predict
# def predict_and_save_scores(
# config,
# path_to_anno=None,
# path_to_imgs=None,
# save_scores_to=None,
# to_csv=None,
# ):
# model = model_from_config(config, which='latest')
# valid_anno = pd.read_csv(path_to_anno, index_col=0)
# predict(config)
# return valid_anno_predicted
def remove_scores_predicted(config):
cwd_slash = gen_cwd_slash(config)
pd.read_csv(cwd_slash('validation_predictions.csv'), index_col=0) \
.drop('Scores Predicted', 1) \
.to_csv(cwd_slash('validation_predictions.csv'))
def evaluate_validation_prediction(config):
info('evaluate_validation_prediction()')
cwd_slash = gen_cwd_slash(config)
anno = pd.read_csv(config['path_to_valid_anno_cache'], index_col=0, dtype=object)
prediction_df = pd.read_csv(cwd_slash('valid_predicted.csv'), index_col=0, dtype=object)
anno = anno.join(prediction_df, how='left')
# DEBUG BEGIN
anno.loc[:, ['Target', 'Predicted', 'folder', 'extension']].to_csv(cwd_slash('valid_anno_predicted.csv'))
# DEBUG END
y_true, y_pred = anno_to_binary(anno, config)
macro_f1_score, f1_details = np_macro_f1(y_true, y_pred, config, return_details=True)
print(format_macro_f1_details(f1_details, config))
print(f'macro_f1_score = {macro_f1_score}')
def final_corrections(config):
info('final_corrections()')
cwd_slash = gen_cwd_slash(config)
anno = pd.read_csv(cwd_slash('test_predicted.csv'), index_col=0)
# correct best submission [TODO: REMOVE: not for private leaderboard] --------------
# best_anno = pd.read_csv(cwd_slash('submission_587.csv'), index_col=0)
# rare_classes = [15, 27, 10, 8, 9, 17, 20, 24, 26]
# comparison_anno = anno.copy()
# comparison_anno['best'] = best_anno['Predicted']
# plot_imgs(
# config,
# comparison_anno.query('best != Predicted').sample(28),
# save_as='./tmp/best_submission_corrections.png',
# folder='data/test_minimaps',
# extension='jpg',
# )
# new_rows = []
# for id_, row in comparison_anno.iterrows():
# current_labels = str_to_labels(row['Predicted'])
# best_labels = str_to_labels(row['best'])
# for c in rare_classes:
# if c in current_labels and c not in best_labels:
# debug(f"removing {c} from {id_}")
# current_labels.remove(c)
# if c not in current_labels and c in best_labels:
# debug(f"adding {c} to {id_}")
# current_labels.append(c)
# new_row = {
# 'Id': id_,
# 'Predicted': labels_to_str(current_labels),
# }
# new_rows.append(new_row)
# anno = pd.DataFrame.from_records(new_rows).set_index('Id')
# debug(f"anno ({len(anno)}) =\n{anno.head(10)}")
# correct leaked --------------
# pairs_anno = pd.read_csv('data/identical_pairs.csv')
# hpa_anno = pd.read_csv('data/hpa_public_imgs.csv', index_col=0)
# correction_anno = pairs_anno.join(hpa_anno, how='left', on=['hpa_id'])\
# .join(anno, how='left', on=['test_id'])
# correction_anno['Target'] = [labels_to_str(str_to_labels(x)) for x in correction_anno['Target']]
# debug(f"correction_anno['test_id'] = {correction_anno['test_id']}")
# debug(f"len = {len(anno.loc[correction_anno['test_id'], 'Predicted'].values)}")
# correction_anno['Predicted'] = anno.loc[correction_anno['test_id'], 'Predicted'].values
# actual_corrections = correction_anno.query('Predicted != Target').set_index('test_id')
# # DEBUG BEGIN
# # plot_imgs(config, actual_corrections, folder='data/test_minimaps', extension='jpg')
# # DEBUG END
# debug(f"making {len(correction_anno)} corrections, {len(actual_corrections)} are actually different")
# debug(f"actual_corrections =\n{actual_corrections}")
# anno.loc[correction_anno['test_id'], 'Predicted'] = correction_anno['Target'].values
# correct leaked 2 --------------
pairs_anno = pd.read_csv('data/identical_pairs_new_fixed.csv')
for i_begin, i_end in chunk(len(pairs_anno), 24):
plot_imgs(
config,
pairs_anno.iloc[i_begin:i_end].drop('test_id', axis=1).set_index('hpa_id'),
save_as=f'./tmp/diff_{i_begin}_hpa.jpg',
folder='data/hpa_public_imgs',
extension='jpg',
background_color=None,
channel=None,
dpi=100,
)
plot_imgs(
config,
pairs_anno.iloc[i_begin:i_end].drop('hpa_id', axis=1).set_index('test_id'),
save_as=f'./tmp/diff_{i_begin}_test.jpg',
folder='data/test_full_size',
extension='tif',
background_color=None,
channel=['red', 'green', 'blue'],
dpi=100,
)
hpa_anno = | pd.read_csv('data/hpa_public_imgs.csv', index_col=0) | pandas.read_csv |
#!/usr/bin/env python
###############################################################################
# binning.py - A binning algorithm spinning off of the methodology of
# Lorikeet
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
__author__ = "<NAME>"
__copyright__ = "Copyright 2020"
__credits__ = ["<NAME>"]
__license__ = "GPL3"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL> near hdr.qut.edu.au"
__status__ = "Development"
import json
import logging
###############################################################################
# System imports
import sys
import os
import matplotlib
import matplotlib.pyplot as plt
# Function imports
import numpy as np
import pandas as pd
import scipy.stats as sp_stats
import seaborn as sns
import skbio.stats.composition
import umap
from Bio import SeqIO
from numba import njit
from numpy import int64
# self imports
import flight.metrics as metrics
import flight.utils as utils
# Set plotting style
sns.set(style='white', context='notebook', rc={'figure.figsize': (14, 10)})
matplotlib.use('pdf')
# Debug
debug = {
1: logging.CRITICAL,
2: logging.ERROR,
3: logging.WARNING,
4: logging.INFO,
5: logging.DEBUG
}
###############################################################################
############################### - Exceptions - ################################
class BadTreeFileException(Exception):
pass
############################################################################### [44/1010]
################################ - Functions - ################################
@njit
def index(array, item):
for idx, val in np.ndenumerate(array):
if val == item:
return idx
###############################################################################
################################ - Classes - ##################################
class Binner:
def __init__(
self,
count_path,
kmer_frequencies,
output_prefix,
assembly,
long_count_path=None,
n_neighbors=100,
min_dist=0.1,
min_contig_size=2500,
threads=8,
a=1.58,
b=0.4,
min_bin_size=200000,
initialization='spectral',
random_seed=42069
):
self.max_time_to_recluster_bin = 1800 # 30 mins
self.findem = []
self.min_contig_size = min_contig_size
self.min_bin_size = min_bin_size
self.threads = threads
self.checked_bins = [] # Used in the pdist function
self.survived = []
# Open up assembly
self.assembly = {}
self.assembly_names = {}
if assembly is not None:
for (tid, rec) in enumerate(SeqIO.parse(assembly, "fasta")):
self.assembly[rec.id] = tid
self.assembly_names[tid] = rec.id
# initialize bin dictionary Label: Vec<Contig>
self.bins = {}
self.bin_validity = {}
## Set up clusterer and UMAP
self.path = output_prefix
self.coverage_profile = None
self.kmer_signature = None
self.contig_lengths = None
## These tables should have the same ordering as each other if they came from rosella.
## I.e. all the rows match the same contig
if count_path is not None and long_count_path is not None:
self.coverage_table = pd.read_csv(count_path, sep='\t')
self.long_depths = pd.read_csv(long_count_path, sep='\t')
self.coverage_table['coverageSum'] = (self.coverage_table.iloc[:, 3::2] > 0).any(axis=1)
self.long_depths['coverageSum'] = (self.long_depths.iloc[:, 3::2] > 0).any(axis=1)
self.large_contigs = self.coverage_table[(self.coverage_table["contigLen"] >= min_contig_size)
& ((self.coverage_table["coverageSum"])
& (self.long_depths["coverageSum"]))]
self.small_contigs = self.coverage_table[(self.coverage_table["contigLen"] < min_contig_size)
| ((~self.coverage_table["coverageSum"])
| (~self.long_depths["coverageSum"]))]
self.long_depths = self.long_depths[self.long_depths['contigName'].isin(self.large_contigs['contigName'])]
self.large_contigs = self.large_contigs.drop('coverageSum', axis=1)
self.small_contigs = self.small_contigs.drop('coverageSum', axis=1)
self.long_depths = self.long_depths.drop('coverageSum', axis=1)
self.large_contigs = pd.concat([self.large_contigs, self.long_depths.iloc[:, 3:]], axis = 1)
self.n_samples = len(self.large_contigs.columns[3::2])
self.long_samples = 0
self.short_sample_distance = utils.sample_distance(self.large_contigs)
self.long_sample_distance = utils.sample_distance(self.long_depths)
elif count_path is not None:
self.coverage_table = pd.read_csv(count_path, sep='\t')
self.coverage_table['coverageSum'] = (self.coverage_table.iloc[:, 3::2] > 0).any(axis=1)
self.large_contigs = self.coverage_table[(self.coverage_table["contigLen"] >= min_contig_size)
& (self.coverage_table["coverageSum"])]
self.small_contigs = self.coverage_table[(self.coverage_table["contigLen"] < min_contig_size)
| (~self.coverage_table["coverageSum"])]
self.large_contigs = self.large_contigs.drop('coverageSum', axis=1)
self.small_contigs = self.small_contigs.drop('coverageSum', axis=1)
self.short_sample_distance = utils.sample_distance(self.large_contigs)
self.n_samples = len(self.large_contigs.columns[3::2])
self.long_samples = 0
else:
## Treat long coverages as the default set
self.coverage_table = pd.read_csv(long_count_path, sep='\t')
self.coverage_table['coverageSum'] = (self.coverage_table.iloc[:, 3::2] > 0).any(axis=1)
self.large_contigs = self.coverage_table[(self.coverage_table["contigLen"] >= min_contig_size)
& (self.coverage_table["coverageSum"])]
self.small_contigs = self.coverage_table[(self.coverage_table["contigLen"] < min_contig_size)
| (~self.coverage_table["coverageSum"])]
self.large_contigs = self.large_contigs.drop('coverageSum', axis=1)
self.small_contigs = self.small_contigs.drop('coverageSum', axis=1)
self.long_sample_distance = utils.sample_distance(self.large_contigs)
self.long_samples = 0
self.n_samples = len(self.large_contigs.columns[3::2])
if assembly is None:
for (tid, rec) in enumerate(self.coverage_table['contigName']):
self.assembly[rec] = tid
self.assembly_names[tid] = rec
tids = []
for name in self.large_contigs['contigName']:
tids.append(self.assembly[name])
self.large_contigs['tid'] = tids
## Handle TNFs
self.tnfs = | pd.read_csv(kmer_frequencies, sep='\t') | pandas.read_csv |
from datetime import (
datetime,
timedelta,
)
import re
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas.errors import InvalidIndexError
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_integer
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
isna,
notna,
)
import pandas._testing as tm
import pandas.core.common as com
# We pass through a TypeError raised by numpy
_slice_msg = "slice indices must be integers or None or have an __index__ method"
class TestDataFrameIndexing:
def test_getitem(self, float_frame):
# Slicing
sl = float_frame[:20]
assert len(sl.index) == 20
# Column access
for _, series in sl.items():
assert len(series.index) == 20
assert tm.equalContents(series.index, sl.index)
for key, _ in float_frame._series.items():
assert float_frame[key] is not None
assert "random" not in float_frame
with pytest.raises(KeyError, match="random"):
float_frame["random"]
def test_getitem2(self, float_frame):
df = float_frame.copy()
df["$10"] = np.random.randn(len(df))
ad = np.random.randn(len(df))
df["@awesome_domain"] = ad
with pytest.raises(KeyError, match=re.escape("'df[\"$10\"]'")):
df.__getitem__('df["$10"]')
res = df["@awesome_domain"]
tm.assert_numpy_array_equal(ad, res.values)
def test_setitem_list(self, float_frame):
float_frame["E"] = "foo"
data = float_frame[["A", "B"]]
float_frame[["B", "A"]] = data
tm.assert_series_equal(float_frame["B"], data["A"], check_names=False)
tm.assert_series_equal(float_frame["A"], data["B"], check_names=False)
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
data[["A"]] = float_frame[["A", "B"]]
newcolumndata = range(len(data.index) - 1)
msg = (
rf"Length of values \({len(newcolumndata)}\) "
rf"does not match length of index \({len(data)}\)"
)
with pytest.raises(ValueError, match=msg):
data["A"] = newcolumndata
def test_setitem_list2(self):
df = DataFrame(0, index=range(3), columns=["tt1", "tt2"], dtype=np.int_)
df.loc[1, ["tt1", "tt2"]] = [1, 2]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series([1, 2], df.columns, dtype=np.int_, name=1)
tm.assert_series_equal(result, expected)
df["tt1"] = df["tt2"] = "0"
df.loc[df.index[1], ["tt1", "tt2"]] = ["1", "2"]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series(["1", "2"], df.columns, name=1)
tm.assert_series_equal(result, expected)
def test_getitem_boolean(self, mixed_float_frame, mixed_int_frame, datetime_frame):
# boolean indexing
d = datetime_frame.index[10]
indexer = datetime_frame.index > d
indexer_obj = indexer.astype(object)
subindex = datetime_frame.index[indexer]
subframe = datetime_frame[indexer]
tm.assert_index_equal(subindex, subframe.index)
with pytest.raises(ValueError, match="Item wrong length"):
datetime_frame[indexer[:-1]]
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
with pytest.raises(ValueError, match="Boolean array expected"):
datetime_frame[datetime_frame]
# test that Series work
indexer_obj = Series(indexer_obj, datetime_frame.index)
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test that Series indexers reindex
# we are producing a warning that since the passed boolean
# key is not the same as the given index, we will reindex
# not sure this is really necessary
with tm.assert_produces_warning(UserWarning):
indexer_obj = indexer_obj.reindex(datetime_frame.index[::-1])
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test df[df > 0]
for df in [
datetime_frame,
mixed_float_frame,
mixed_int_frame,
]:
data = df._get_numeric_data()
bif = df[df > 0]
bifw = DataFrame(
{c: np.where(data[c] > 0, data[c], np.nan) for c in data.columns},
index=data.index,
columns=data.columns,
)
# add back other columns to compare
for c in df.columns:
if c not in bifw:
bifw[c] = df[c]
bifw = bifw.reindex(columns=df.columns)
tm.assert_frame_equal(bif, bifw, check_dtype=False)
for c in df.columns:
if bif[c].dtype != bifw[c].dtype:
assert bif[c].dtype == df[c].dtype
def test_getitem_boolean_casting(self, datetime_frame):
# don't upcast if we don't need to
df = datetime_frame.copy()
df["E"] = 1
df["E"] = df["E"].astype("int32")
df["E1"] = df["E"].copy()
df["F"] = 1
df["F"] = df["F"].astype("int64")
df["F1"] = df["F"].copy()
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")] * 2
+ [np.dtype("int64")] * 2,
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
# int block splitting
df.loc[df.index[1:3], ["E1", "F1"]] = 0
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")]
+ [np.dtype("float64")]
+ [np.dtype("int64")]
+ [np.dtype("float64")],
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
def test_getitem_boolean_list(self):
df = DataFrame(np.arange(12).reshape(3, 4))
def _checkit(lst):
result = df[lst]
expected = df.loc[df.index[lst]]
tm.assert_frame_equal(result, expected)
_checkit([True, False, True])
_checkit([True, True, True])
_checkit([False, False, False])
def test_getitem_boolean_iadd(self):
arr = np.random.randn(5, 5)
df = DataFrame(arr.copy(), columns=["A", "B", "C", "D", "E"])
df[df < 0] += 1
arr[arr < 0] += 1
tm.assert_almost_equal(df.values, arr)
def test_boolean_index_empty_corner(self):
# #2096
blah = DataFrame(np.empty([0, 1]), columns=["A"], index=DatetimeIndex([]))
# both of these should succeed trivially
k = np.array([], bool)
blah[k]
blah[k] = 0
def test_getitem_ix_mixed_integer(self):
df = DataFrame(
np.random.randn(4, 3), index=[1, 10, "C", "E"], columns=[1, 2, 3]
)
result = df.iloc[:-1]
expected = df.loc[df.index[:-1]]
tm.assert_frame_equal(result, expected)
result = df.loc[[1, 10]]
expected = df.loc[Index([1, 10])]
tm.assert_frame_equal(result, expected)
def test_getitem_ix_mixed_integer2(self):
# 11320
df = DataFrame(
{
"rna": (1.5, 2.2, 3.2, 4.5),
-1000: [11, 21, 36, 40],
0: [10, 22, 43, 34],
1000: [0, 10, 20, 30],
},
columns=["rna", -1000, 0, 1000],
)
result = df[[1000]]
expected = df.iloc[:, [3]]
tm.assert_frame_equal(result, expected)
result = df[[-1000]]
expected = df.iloc[:, [1]]
tm.assert_frame_equal(result, expected)
def test_getattr(self, float_frame):
tm.assert_series_equal(float_frame.A, float_frame["A"])
msg = "'DataFrame' object has no attribute 'NONEXISTENT_NAME'"
with pytest.raises(AttributeError, match=msg):
float_frame.NONEXISTENT_NAME
def test_setattr_column(self):
df = DataFrame({"foobar": 1}, index=range(10))
df.foobar = 5
assert (df.foobar == 5).all()
def test_setitem(self, float_frame):
# not sure what else to do here
series = float_frame["A"][::2]
float_frame["col5"] = series
assert "col5" in float_frame
assert len(series) == 15
assert len(float_frame) == 30
exp = np.ravel(np.column_stack((series.values, [np.nan] * 15)))
exp = Series(exp, index=float_frame.index, name="col5")
tm.assert_series_equal(float_frame["col5"], exp)
series = float_frame["A"]
float_frame["col6"] = series
tm.assert_series_equal(series, float_frame["col6"], check_names=False)
# set ndarray
arr = np.random.randn(len(float_frame))
float_frame["col9"] = arr
assert (float_frame["col9"] == arr).all()
float_frame["col7"] = 5
assert (float_frame["col7"] == 5).all()
float_frame["col0"] = 3.14
assert (float_frame["col0"] == 3.14).all()
float_frame["col8"] = "foo"
assert (float_frame["col8"] == "foo").all()
# this is partially a view (e.g. some blocks are view)
# so raise/warn
smaller = float_frame[:2]
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
smaller["col10"] = ["1", "2"]
assert smaller["col10"].dtype == np.object_
assert (smaller["col10"] == ["1", "2"]).all()
def test_setitem2(self):
# dtype changing GH4204
df = DataFrame([[0, 0]])
df.iloc[0] = np.nan
expected = DataFrame([[np.nan, np.nan]])
tm.assert_frame_equal(df, expected)
df = DataFrame([[0, 0]])
df.loc[0] = np.nan
tm.assert_frame_equal(df, expected)
def test_setitem_boolean(self, float_frame):
df = float_frame.copy()
values = float_frame.values
df[df["A"] > 0] = 4
values[values[:, 0] > 0] = 4
tm.assert_almost_equal(df.values, values)
# test that column reindexing works
series = df["A"] == 4
series = series.reindex(df.index[::-1])
df[series] = 1
values[values[:, 0] == 4] = 1
tm.assert_almost_equal(df.values, values)
df[df > 0] = 5
values[values > 0] = 5
tm.assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
tm.assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
tm.assert_almost_equal(df.values, values)
# indexed with same shape but rows-reversed df
df[df[::-1] == 2] = 3
values[values == 2] = 3
tm.assert_almost_equal(df.values, values)
msg = "Must pass DataFrame or 2-d ndarray with boolean values only"
with pytest.raises(TypeError, match=msg):
df[df * 0] = 2
# index with DataFrame
mask = df > np.abs(df)
expected = df.copy()
df[df > np.abs(df)] = np.nan
expected.values[mask.values] = np.nan
tm.assert_frame_equal(df, expected)
# set from DataFrame
expected = df.copy()
df[df > np.abs(df)] = df * 2
np.putmask(expected.values, mask.values, df.values * 2)
tm.assert_frame_equal(df, expected)
def test_setitem_cast(self, float_frame):
float_frame["D"] = float_frame["D"].astype("i8")
assert float_frame["D"].dtype == np.int64
# #669, should not cast?
# this is now set to int64, which means a replacement of the column to
# the value dtype (and nothing to do with the existing dtype)
float_frame["B"] = 0
assert float_frame["B"].dtype == np.int64
# cast if pass array of course
float_frame["B"] = np.arange(len(float_frame))
assert issubclass(float_frame["B"].dtype.type, np.integer)
float_frame["foo"] = "bar"
float_frame["foo"] = 0
assert float_frame["foo"].dtype == np.int64
float_frame["foo"] = "bar"
float_frame["foo"] = 2.5
assert float_frame["foo"].dtype == np.float64
float_frame["something"] = 0
assert float_frame["something"].dtype == np.int64
float_frame["something"] = 2
assert float_frame["something"].dtype == np.int64
float_frame["something"] = 2.5
assert float_frame["something"].dtype == np.float64
def test_setitem_corner(self, float_frame):
# corner case
df = DataFrame({"B": [1.0, 2.0, 3.0], "C": ["a", "b", "c"]}, index=np.arange(3))
del df["B"]
df["B"] = [1.0, 2.0, 3.0]
assert "B" in df
assert len(df.columns) == 2
df["A"] = "beginning"
df["E"] = "foo"
df["D"] = "bar"
df[datetime.now()] = "date"
df[datetime.now()] = 5.0
# what to do when empty frame with index
dm = DataFrame(index=float_frame.index)
dm["A"] = "foo"
dm["B"] = "bar"
assert len(dm.columns) == 2
assert dm.values.dtype == np.object_
# upcast
dm["C"] = 1
assert dm["C"].dtype == np.int64
dm["E"] = 1.0
assert dm["E"].dtype == np.float64
# set existing column
dm["A"] = "bar"
assert "bar" == dm["A"][0]
dm = DataFrame(index=np.arange(3))
dm["A"] = 1
dm["foo"] = "bar"
del dm["foo"]
dm["foo"] = "bar"
assert dm["foo"].dtype == np.object_
dm["coercible"] = ["1", "2", "3"]
assert dm["coercible"].dtype == np.object_
def test_setitem_corner2(self):
data = {
"title": ["foobar", "bar", "foobar"] + ["foobar"] * 17,
"cruft": np.random.random(20),
}
df = DataFrame(data)
ix = df[df["title"] == "bar"].index
df.loc[ix, ["title"]] = "foobar"
df.loc[ix, ["cruft"]] = 0
assert df.loc[1, "title"] == "foobar"
assert df.loc[1, "cruft"] == 0
def test_setitem_ambig(self):
# Difficulties with mixed-type data
from decimal import Decimal
# Created as float type
dm = DataFrame(index=range(3), columns=range(3))
coercable_series = Series([Decimal(1) for _ in range(3)], index=range(3))
uncoercable_series = Series(["foo", "bzr", "baz"], index=range(3))
dm[0] = np.ones(3)
assert len(dm.columns) == 3
dm[1] = coercable_series
assert len(dm.columns) == 3
dm[2] = uncoercable_series
assert len(dm.columns) == 3
assert dm[2].dtype == np.object_
def test_setitem_None(self, float_frame):
# GH #766
float_frame[None] = float_frame["A"]
tm.assert_series_equal(
float_frame.iloc[:, -1], float_frame["A"], check_names=False
)
tm.assert_series_equal(
float_frame.loc[:, None], float_frame["A"], check_names=False
)
tm.assert_series_equal(float_frame[None], float_frame["A"], check_names=False)
repr(float_frame)
def test_loc_setitem_boolean_mask_allfalse(self):
# GH 9596
df = DataFrame(
{"a": ["1", "2", "3"], "b": ["11", "22", "33"], "c": ["111", "222", "333"]}
)
result = df.copy()
result.loc[result.b.isna(), "a"] = result.a
tm.assert_frame_equal(result, df)
def test_getitem_fancy_slice_integers_step(self):
df = DataFrame(np.random.randn(10, 5))
# this is OK
result = df.iloc[:8:2] # noqa
df.iloc[:8:2] = np.nan
assert isna(df.iloc[:8:2]).values.all()
def test_getitem_setitem_integer_slice_keyerrors(self):
df = DataFrame(np.random.randn(10, 5), index=range(0, 20, 2))
# this is OK
cp = df.copy()
cp.iloc[4:10] = 0
assert (cp.iloc[4:10] == 0).values.all()
# so is this
cp = df.copy()
cp.iloc[3:11] = 0
assert (cp.iloc[3:11] == 0).values.all()
result = df.iloc[2:6]
result2 = df.loc[3:11]
expected = df.reindex([4, 6, 8, 10])
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# non-monotonic, raise KeyError
df2 = df.iloc[list(range(5)) + list(range(5, 10))[::-1]]
with pytest.raises(KeyError, match=r"^3$"):
df2.loc[3:11]
with pytest.raises(KeyError, match=r"^3$"):
df2.loc[3:11] = 0
@td.skip_array_manager_invalid_test # already covered in test_iloc_col_slice_view
def test_fancy_getitem_slice_mixed(self, float_frame, float_string_frame):
sliced = float_string_frame.iloc[:, -3:]
assert sliced["D"].dtype == np.float64
# get view with single block
# setting it triggers setting with copy
sliced = float_frame.iloc[:, -3:]
assert np.shares_memory(sliced["C"]._values, float_frame["C"]._values)
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
sliced.loc[:, "C"] = 4.0
assert (float_frame["C"] == 4).all()
def test_getitem_setitem_non_ix_labels(self):
df = tm.makeTimeDataFrame()
start, end = df.index[[5, 10]]
result = df.loc[start:end]
result2 = df[start:end]
expected = df[5:11]
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
result = df.copy()
result.loc[start:end] = 0
result2 = df.copy()
result2[start:end] = 0
expected = df.copy()
expected[5:11] = 0
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
def test_ix_multi_take(self):
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index == 0, :]
xp = df.reindex([0])
tm.assert_frame_equal(rs, xp)
# GH#1321
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index == 0, df.columns == 1]
xp = df.reindex(index=[0], columns=[1])
tm.assert_frame_equal(rs, xp)
def test_getitem_fancy_scalar(self, float_frame):
f = float_frame
ix = f.loc
# individual value
for col in f.columns:
ts = f[col]
for idx in f.index[::5]:
assert ix[idx, col] == ts[idx]
@td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values
def test_setitem_fancy_scalar(self, float_frame):
f = float_frame
expected = float_frame.copy()
ix = f.loc
# individual value
for j, col in enumerate(f.columns):
ts = f[col] # noqa
for idx in f.index[::5]:
i = f.index.get_loc(idx)
val = np.random.randn()
expected.values[i, j] = val
ix[idx, col] = val
tm.assert_frame_equal(f, expected)
def test_getitem_fancy_boolean(self, float_frame):
f = float_frame
ix = f.loc
expected = f.reindex(columns=["B", "D"])
result = ix[:, [False, True, False, True]]
tm.assert_frame_equal(result, expected)
expected = f.reindex(index=f.index[5:10], columns=["B", "D"])
result = ix[f.index[5:10], [False, True, False, True]]
tm.assert_frame_equal(result, expected)
boolvec = f.index > f.index[7]
expected = f.reindex(index=f.index[boolvec])
result = ix[boolvec]
tm.assert_frame_equal(result, expected)
result = ix[boolvec, :]
tm.assert_frame_equal(result, expected)
result = ix[boolvec, f.columns[2:]]
expected = f.reindex(index=f.index[boolvec], columns=["C", "D"])
tm.assert_frame_equal(result, expected)
@td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values
def test_setitem_fancy_boolean(self, float_frame):
# from 2d, set with booleans
frame = float_frame.copy()
expected = float_frame.copy()
mask = frame["A"] > 0
frame.loc[mask] = 0.0
expected.values[mask.values] = 0.0
tm.assert_frame_equal(frame, expected)
frame = float_frame.copy()
expected = float_frame.copy()
frame.loc[mask, ["A", "B"]] = 0.0
expected.values[mask.values, :2] = 0.0
tm.assert_frame_equal(frame, expected)
def test_getitem_fancy_ints(self, float_frame):
result = float_frame.iloc[[1, 4, 7]]
expected = float_frame.loc[float_frame.index[[1, 4, 7]]]
tm.assert_frame_equal(result, expected)
result = float_frame.iloc[:, [2, 0, 1]]
expected = float_frame.loc[:, float_frame.columns[[2, 0, 1]]]
tm.assert_frame_equal(result, expected)
def test_getitem_setitem_boolean_misaligned(self, float_frame):
# boolean index misaligned labels
mask = float_frame["A"][::-1] > 1
result = float_frame.loc[mask]
expected = float_frame.loc[mask[::-1]]
tm.assert_frame_equal(result, expected)
cp = float_frame.copy()
expected = float_frame.copy()
cp.loc[mask] = 0
expected.loc[mask] = 0
tm.assert_frame_equal(cp, expected)
def test_getitem_setitem_boolean_multi(self):
df = DataFrame(np.random.randn(3, 2))
# get
k1 = np.array([True, False, True])
k2 = np.array([False, True])
result = df.loc[k1, k2]
expected = df.loc[[0, 2], [1]]
tm.assert_frame_equal(result, expected)
expected = df.copy()
df.loc[np.array([True, False, True]), np.array([False, True])] = 5
expected.loc[[0, 2], [1]] = 5
tm.assert_frame_equal(df, expected)
def test_getitem_setitem_float_labels(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.random.randn(5, 5), index=index)
result = df.loc[1.5:4]
expected = df.reindex([1.5, 2, 3, 4])
tm.assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4:5]
expected = df.reindex([4, 5]) # reindex with int
tm.assert_frame_equal(result, expected, check_index_type=False)
assert len(result) == 2
result = df.loc[4:5]
expected = df.reindex([4.0, 5.0]) # reindex with float
tm.assert_frame_equal(result, expected)
assert len(result) == 2
# loc_float changes this to work properly
result = df.loc[1:2]
expected = df.iloc[0:2]
tm.assert_frame_equal(result, expected)
df.loc[1:2] = 0
result = df[1:2]
assert (result == 0).all().all()
# #2727
index = Index([1.0, 2.5, 3.5, 4.5, 5.0])
df = DataFrame(np.random.randn(5, 5), index=index)
# positional slicing only via iloc!
msg = (
"cannot do positional indexing on Float64Index with "
r"these indexers \[1.0\] of type float"
)
with pytest.raises(TypeError, match=msg):
df.iloc[1.0:5]
result = df.iloc[4:5]
expected = df.reindex([5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 1
cp = df.copy()
with pytest.raises(TypeError, match=_slice_msg):
cp.iloc[1.0:5] = 0
with pytest.raises(TypeError, match=msg):
result = cp.iloc[1.0:5] == 0
assert result.values.all()
assert (cp.iloc[0:1] == df.iloc[0:1]).values.all()
cp = df.copy()
cp.iloc[4:5] = 0
assert (cp.iloc[4:5] == 0).values.all()
assert (cp.iloc[0:4] == df.iloc[0:4]).values.all()
# float slicing
result = df.loc[1.0:5]
expected = df
tm.assert_frame_equal(result, expected)
assert len(result) == 5
result = df.loc[1.1:5]
expected = df.reindex([2.5, 3.5, 4.5, 5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4.51:5]
expected = df.reindex([5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 1
result = df.loc[1.0:5.0]
expected = df.reindex([1.0, 2.5, 3.5, 4.5, 5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 5
cp = df.copy()
cp.loc[1.0:5.0] = 0
result = cp.loc[1.0:5.0]
assert (result == 0).values.all()
def test_setitem_single_column_mixed_datetime(self):
df = DataFrame(
np.random.randn(5, 3),
index=["a", "b", "c", "d", "e"],
columns=["foo", "bar", "baz"],
)
df["timestamp"] = Timestamp("20010102")
# check our dtypes
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 3 + [np.dtype("datetime64[ns]")],
index=["foo", "bar", "baz", "timestamp"],
)
tm.assert_series_equal(result, expected)
# GH#16674 iNaT is treated as an integer when given by the user
df.loc["b", "timestamp"] = iNaT
assert not isna(df.loc["b", "timestamp"])
assert df["timestamp"].dtype == np.object_
assert df.loc["b", "timestamp"] == iNaT
# allow this syntax (as of GH#3216)
df.loc["c", "timestamp"] = np.nan
assert isna(df.loc["c", "timestamp"])
# allow this syntax
df.loc["d", :] = np.nan
assert not isna(df.loc["c", :]).all()
def test_setitem_mixed_datetime(self):
# GH 9336
expected = DataFrame(
{
"a": [0, 0, 0, 0, 13, 14],
"b": [
datetime(2012, 1, 1),
1,
"x",
"y",
datetime(2013, 1, 1),
datetime(2014, 1, 1),
],
}
)
df = DataFrame(0, columns=list("ab"), index=range(6))
df["b"] = pd.NaT
df.loc[0, "b"] = datetime(2012, 1, 1)
df.loc[1, "b"] = 1
df.loc[[2, 3], "b"] = "x", "y"
A = np.array(
[
[13, np.datetime64("2013-01-01T00:00:00")],
[14, np.datetime64("2014-01-01T00:00:00")],
]
)
df.loc[[4, 5], ["a", "b"]] = A
tm.assert_frame_equal(df, expected)
def test_setitem_frame_float(self, float_frame):
piece = float_frame.loc[float_frame.index[:2], ["A", "B"]]
float_frame.loc[float_frame.index[-2] :, ["A", "B"]] = piece.values
result = float_frame.loc[float_frame.index[-2:], ["A", "B"]].values
expected = piece.values
tm.assert_almost_equal(result, expected)
def test_setitem_frame_mixed(self, float_string_frame):
# GH 3216
# already aligned
f = float_string_frame.copy()
piece = DataFrame(
[[1.0, 2.0], [3.0, 4.0]], index=f.index[0:2], columns=["A", "B"]
)
key = (f.index[slice(None, 2)], ["A", "B"])
f.loc[key] = piece
tm.assert_almost_equal(f.loc[f.index[0:2], ["A", "B"]].values, piece.values)
def test_setitem_frame_mixed_rows_unaligned(self, float_string_frame):
# GH#3216 rows unaligned
f = float_string_frame.copy()
piece = DataFrame(
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]],
index=list(f.index[0:2]) + ["foo", "bar"],
columns=["A", "B"],
)
key = (f.index[slice(None, 2)], ["A", "B"])
f.loc[key] = piece
tm.assert_almost_equal(
f.loc[f.index[0:2:], ["A", "B"]].values, piece.values[0:2]
)
def test_setitem_frame_mixed_key_unaligned(self, float_string_frame):
# GH#3216 key is unaligned with values
f = float_string_frame.copy()
piece = f.loc[f.index[:2], ["A"]]
piece.index = f.index[-2:]
key = (f.index[slice(-2, None)], ["A", "B"])
f.loc[key] = piece
piece["B"] = np.nan
tm.assert_almost_equal(f.loc[f.index[-2:], ["A", "B"]].values, piece.values)
def test_setitem_frame_mixed_ndarray(self, float_string_frame):
# GH#3216 ndarray
f = float_string_frame.copy()
piece = float_string_frame.loc[f.index[:2], ["A", "B"]]
key = (f.index[slice(-2, None)], ["A", "B"])
f.loc[key] = piece.values
tm.assert_almost_equal(f.loc[f.index[-2:], ["A", "B"]].values, piece.values)
def test_setitem_frame_upcast(self):
# needs upcasting
df = DataFrame([[1, 2, "foo"], [3, 4, "bar"]], columns=["A", "B", "C"])
df2 = df.copy()
df2.loc[:, ["A", "B"]] = df.loc[:, ["A", "B"]] + 0.5
expected = df.reindex(columns=["A", "B"])
expected += 0.5
expected["C"] = df["C"]
tm.assert_frame_equal(df2, expected)
def test_setitem_frame_align(self, float_frame):
piece = float_frame.loc[float_frame.index[:2], ["A", "B"]]
piece.index = float_frame.index[-2:]
piece.columns = ["A", "B"]
float_frame.loc[float_frame.index[-2:], ["A", "B"]] = piece
result = float_frame.loc[float_frame.index[-2:], ["A", "B"]].values
expected = piece.values
tm.assert_almost_equal(result, expected)
def test_getitem_setitem_ix_duplicates(self):
# #1201
df = DataFrame(np.random.randn(5, 3), index=["foo", "foo", "bar", "baz", "bar"])
result = df.loc["foo"]
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.loc["bar"]
expected = df.iloc[[2, 4]]
tm.assert_frame_equal(result, expected)
result = df.loc["baz"]
expected = df.iloc[3]
tm.assert_series_equal(result, expected)
def test_getitem_ix_boolean_duplicates_multiple(self):
# #1201
df = DataFrame(np.random.randn(5, 3), index=["foo", "foo", "bar", "baz", "bar"])
result = df.loc[["bar"]]
exp = df.iloc[[2, 4]]
tm.assert_frame_equal(result, exp)
result = df.loc[df[1] > 0]
exp = df[df[1] > 0]
tm.assert_frame_equal(result, exp)
result = df.loc[df[0] > 0]
exp = df[df[0] > 0]
tm.assert_frame_equal(result, exp)
@pytest.mark.parametrize("bool_value", [True, False])
def test_getitem_setitem_ix_bool_keyerror(self, bool_value):
# #2199
df = DataFrame({"a": [1, 2, 3]})
message = f"{bool_value}: boolean label can not be used without a boolean index"
with pytest.raises(KeyError, match=message):
df.loc[bool_value]
msg = "cannot use a single bool to index into setitem"
with pytest.raises(KeyError, match=msg):
df.loc[bool_value] = 0
# TODO: rename? remove?
def test_single_element_ix_dont_upcast(self, float_frame):
float_frame["E"] = 1
assert issubclass(float_frame["E"].dtype.type, (int, np.integer))
result = float_frame.loc[float_frame.index[5], "E"]
assert is_integer(result)
# GH 11617
df = DataFrame({"a": [1.23]})
df["b"] = 666
result = df.loc[0, "b"]
assert is_integer(result)
expected = Series([666], [0], name="b")
result = df.loc[[0], "b"]
tm.assert_series_equal(result, expected)
def test_iloc_row(self):
df = DataFrame(np.random.randn(10, 4), index=range(0, 20, 2))
result = df.iloc[1]
exp = df.loc[2]
tm.assert_series_equal(result, exp)
result = df.iloc[2]
exp = df.loc[4]
tm.assert_series_equal(result, exp)
# slice
result = df.iloc[slice(4, 8)]
expected = df.loc[8:14]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[1, 2, 4, 6]]
expected = df.reindex(df.index[[1, 2, 4, 6]])
tm.assert_frame_equal(result, expected)
def test_iloc_row_slice_view(self, using_array_manager):
df = DataFrame(np.random.randn(10, 4), index=range(0, 20, 2))
original = df.copy()
# verify slice is view
# setting it makes it raise/warn
subset = df.iloc[slice(4, 8)]
assert np.shares_memory(df[2], subset[2])
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
subset.loc[:, 2] = 0.0
exp_col = original[2].copy()
# TODO(ArrayManager) verify it is expected that the original didn't change
if not using_array_manager:
exp_col[4:8] = 0.0
tm.assert_series_equal(df[2], exp_col)
def test_iloc_col(self):
df = DataFrame(np.random.randn(4, 10), columns=range(0, 20, 2))
result = df.iloc[:, 1]
exp = df.loc[:, 2]
tm.assert_series_equal(result, exp)
result = df.iloc[:, 2]
exp = df.loc[:, 4]
tm.assert_series_equal(result, exp)
# slice
result = df.iloc[:, slice(4, 8)]
expected = df.loc[:, 8:14]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[:, [1, 2, 4, 6]]
expected = df.reindex(columns=df.columns[[1, 2, 4, 6]])
tm.assert_frame_equal(result, expected)
def test_iloc_col_slice_view(self, using_array_manager):
df = DataFrame(np.random.randn(4, 10), columns=range(0, 20, 2))
original = df.copy()
subset = df.iloc[:, slice(4, 8)]
if not using_array_manager:
# verify slice is view
assert np.shares_memory(df[8]._values, subset[8]._values)
# and that we are setting a copy
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
subset.loc[:, 8] = 0.0
assert (df[8] == 0).all()
else:
# TODO(ArrayManager) verify this is the desired behaviour
subset[8] = 0.0
# subset changed
assert (subset[8] == 0).all()
# but df itself did not change (setitem replaces full column)
tm.assert_frame_equal(df, original)
def test_loc_duplicates(self):
# gh-17105
# insert a duplicate element to the index
trange = date_range(
start=Timestamp(year=2017, month=1, day=1),
end=Timestamp(year=2017, month=1, day=5),
)
trange = trange.insert(loc=5, item=Timestamp(year=2017, month=1, day=5))
df = DataFrame(0, index=trange, columns=["A", "B"])
bool_idx = np.array([False, False, False, False, False, True])
# assignment
df.loc[trange[bool_idx], "A"] = 6
expected = DataFrame(
{"A": [0, 0, 0, 0, 6, 6], "B": [0, 0, 0, 0, 0, 0]}, index=trange
)
tm.assert_frame_equal(df, expected)
# in-place
df = DataFrame(0, index=trange, columns=["A", "B"])
df.loc[trange[bool_idx], "A"] += 6
tm.assert_frame_equal(df, expected)
def test_setitem_with_unaligned_tz_aware_datetime_column(self):
# GH 12981
# Assignment of unaligned offset-aware datetime series.
# Make sure timezone isn't lost
column = Series(date_range("2015-01-01", periods=3, tz="utc"), name="dates")
df = DataFrame({"dates": column})
df["dates"] = column[[1, 0, 2]]
tm.assert_series_equal(df["dates"], column)
df = DataFrame({"dates": column})
df.loc[[0, 1, 2], "dates"] = column[[1, 0, 2]]
tm.assert_series_equal(df["dates"], column)
def test_loc_setitem_datetimelike_with_inference(self):
# GH 7592
# assignment of timedeltas with NaT
one_hour = timedelta(hours=1)
df = DataFrame(index=date_range("20130101", periods=4))
df["A"] = np.array([1 * one_hour] * 4, dtype="m8[ns]")
df.loc[:, "B"] = np.array([2 * one_hour] * 4, dtype="m8[ns]")
df.loc[df.index[:3], "C"] = np.array([3 * one_hour] * 3, dtype="m8[ns]")
df.loc[:, "D"] = np.array([4 * one_hour] * 4, dtype="m8[ns]")
df.loc[df.index[:3], "E"] = np.array([5 * one_hour] * 3, dtype="m8[ns]")
df["F"] = np.timedelta64("NaT")
df.loc[df.index[:-1], "F"] = np.array([6 * one_hour] * 3, dtype="m8[ns]")
df.loc[df.index[-3] :, "G"] = date_range("20130101", periods=3)
df["H"] = np.datetime64("NaT")
result = df.dtypes
expected = Series(
[np.dtype("timedelta64[ns]")] * 6 + [np.dtype("datetime64[ns]")] * 2,
index=list("ABCDEFGH"),
)
tm.assert_series_equal(result, expected)
def test_getitem_boolean_indexing_mixed(self):
df = DataFrame(
{
0: {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},
1: {
35: np.nan,
40: 0.32632316859446198,
43: np.nan,
49: 0.32632316859446198,
50: 0.39114724480578139,
},
2: {
35: np.nan,
40: np.nan,
43: 0.29012581014105987,
49: np.nan,
50: np.nan,
},
3: {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},
4: {
35: 0.34215328467153283,
40: np.nan,
43: np.nan,
49: np.nan,
50: np.nan,
},
"y": {35: 0, 40: 0, 43: 0, 49: 0, 50: 1},
}
)
# mixed int/float ok
df2 = df.copy()
df2[df2 > 0.3] = 1
expected = df.copy()
expected.loc[40, 1] = 1
expected.loc[49, 1] = 1
expected.loc[50, 1] = 1
expected.loc[35, 4] = 1
tm.assert_frame_equal(df2, expected)
df["foo"] = "test"
msg = "not supported between instances|unorderable types"
with pytest.raises(TypeError, match=msg):
df[df > 0.3] = 1
def test_type_error_multiindex(self):
# See gh-12218
mi = MultiIndex.from_product([["x", "y"], [0, 1]], names=[None, "c"])
dg = DataFrame(
[[1, 1, 2, 2], [3, 3, 4, 4]], columns=mi, index=Index([0, 1], name="i")
)
with pytest.raises(InvalidIndexError, match="slice"):
dg[:, 0]
index = Index(range(2), name="i")
columns = MultiIndex(
levels=[["x", "y"], [0, 1]], codes=[[0, 1], [0, 0]], names=[None, "c"]
)
expected = DataFrame([[1, 2], [3, 4]], columns=columns, index=index)
result = dg.loc[:, (slice(None), 0)]
tm.assert_frame_equal(result, expected)
name = ("x", 0)
index = Index(range(2), name="i")
expected = Series([1, 3], index=index, name=name)
result = dg["x", 0]
tm.assert_series_equal(result, expected)
def test_getitem_interval_index_partial_indexing(self):
# GH#36490
df = DataFrame(
np.ones((3, 4)), columns=pd.IntervalIndex.from_breaks(np.arange(5))
)
expected = df.iloc[:, 0]
res = df[0.5]
tm.assert_series_equal(res, expected)
res = df.loc[:, 0.5]
tm.assert_series_equal(res, expected)
def test_setitem_array_as_cell_value(self):
# GH#43422
df = DataFrame(columns=["a", "b"], dtype=object)
df.loc[0] = {"a": np.zeros((2,)), "b": np.zeros((2, 2))}
expected = DataFrame({"a": [np.zeros((2,))], "b": [np.zeros((2, 2))]})
tm.assert_frame_equal(df, expected)
# with AM goes through split-path, loses dtype
@td.skip_array_manager_not_yet_implemented
def test_iloc_setitem_nullable_2d_values(self):
df = DataFrame({"A": [1, 2, 3]}, dtype="Int64")
orig = df.copy()
df.loc[:] = df.values[:, ::-1]
tm.assert_frame_equal(df, orig)
df.loc[:] = pd.core.arrays.PandasArray(df.values[:, ::-1])
tm.assert_frame_equal(df, orig)
df.iloc[:] = df.iloc[:, :]
tm.assert_frame_equal(df, orig)
@pytest.mark.parametrize(
"null", [pd.NaT, pd.NaT.to_numpy("M8[ns]"), pd.NaT.to_numpy("m8[ns]")]
)
def test_setting_mismatched_na_into_nullable_fails(
self, null, any_numeric_ea_dtype
):
# GH#44514 don't cast mismatched nulls to pd.NA
df = DataFrame({"A": [1, 2, 3]}, dtype=any_numeric_ea_dtype)
ser = df["A"]
arr = ser._values
msg = "|".join(
[
r"int\(\) argument must be a string, a bytes-like object or a "
"(real )?number, not 'NaTType'",
r"timedelta64\[ns\] cannot be converted to an? (Floating|Integer)Dtype",
r"datetime64\[ns\] cannot be converted to an? (Floating|Integer)Dtype",
"object cannot be converted to a FloatingDtype",
"'values' contains non-numeric NA",
]
)
with pytest.raises(TypeError, match=msg):
arr[0] = null
with pytest.raises(TypeError, match=msg):
arr[:2] = [null, null]
with pytest.raises(TypeError, match=msg):
ser[0] = null
with pytest.raises(TypeError, match=msg):
ser[:2] = [null, null]
with pytest.raises(TypeError, match=msg):
ser.iloc[0] = null
with pytest.raises(TypeError, match=msg):
ser.iloc[:2] = [null, null]
with pytest.raises(TypeError, match=msg):
df.iloc[0, 0] = null
with pytest.raises(TypeError, match=msg):
df.iloc[:2, 0] = [null, null]
# Multi-Block
df2 = df.copy()
df2["B"] = ser.copy()
with pytest.raises(TypeError, match=msg):
df2.iloc[0, 0] = null
with pytest.raises(TypeError, match=msg):
df2.iloc[:2, 0] = [null, null]
def test_loc_expand_empty_frame_keep_index_name(self):
# GH#45621
df = DataFrame(columns=["b"], index=Index([], name="a"))
df.loc[0] = 1
expected = DataFrame({"b": [1]}, index=Index([0], name="a"))
tm.assert_frame_equal(df, expected)
def test_loc_expand_empty_frame_keep_midx_names(self):
# GH#46317
df = DataFrame(
columns=["d"], index=MultiIndex.from_tuples([], names=["a", "b", "c"])
)
df.loc[(1, 2, 3)] = "foo"
expected = DataFrame(
{"d": ["foo"]},
index=MultiIndex.from_tuples([(1, 2, 3)], names=["a", "b", "c"]),
)
tm.assert_frame_equal(df, expected)
class TestDataFrameIndexingUInt64:
def test_setitem(self, uint64_frame):
df = uint64_frame
idx = df["A"].rename("foo")
# setitem
assert "C" not in df.columns
df["C"] = idx
tm.assert_series_equal(df["C"], Series(idx, name="C"))
assert "D" not in df.columns
df["D"] = "foo"
df["D"] = idx
tm.assert_series_equal(df["D"], Series(idx, name="D"))
del df["D"]
# With NaN: because uint64 has no NaN element,
# the column should be cast to object.
df2 = df.copy()
df2.iloc[1, 1] = pd.NaT
df2.iloc[1, 2] = pd.NaT
result = df2["B"]
tm.assert_series_equal(notna(result), Series([True, False, True], name="B"))
tm.assert_series_equal(
df2.dtypes,
Series(
[np.dtype("uint64"), np.dtype("O"), np.dtype("O")],
index=["A", "B", "C"],
),
)
def test_object_casting_indexing_wraps_datetimelike(using_array_manager):
# GH#31649, check the indexing methods all the way down the stack
df = DataFrame(
{
"A": [1, 2],
"B": date_range("2000", periods=2),
"C": pd.timedelta_range("1 Day", periods=2),
}
)
ser = df.loc[0]
assert isinstance(ser.values[1], Timestamp)
assert isinstance(ser.values[2], pd.Timedelta)
ser = df.iloc[0]
assert isinstance(ser.values[1], Timestamp)
assert isinstance(ser.values[2], pd.Timedelta)
ser = df.xs(0, axis=0)
assert isinstance(ser.values[1], Timestamp)
assert isinstance(ser.values[2], pd.Timedelta)
if using_array_manager:
# remainder of the test checking BlockManager internals
return
mgr = df._mgr
mgr._rebuild_blknos_and_blklocs()
arr = mgr.fast_xs(0)
assert isinstance(arr[1], Timestamp)
assert isinstance(arr[2], pd.Timedelta)
blk = mgr.blocks[mgr.blknos[1]]
assert blk.dtype == "M8[ns]" # we got the right block
val = blk.iget((0, 0))
assert isinstance(val, Timestamp)
blk = mgr.blocks[mgr.blknos[2]]
assert blk.dtype == "m8[ns]" # we got the right block
val = blk.iget((0, 0))
assert isinstance(val, pd.Timedelta)
msg1 = r"Cannot setitem on a Categorical with a new category( \(.*\))?, set the"
msg2 = "Cannot set a Categorical with another, without identical categories"
class TestLocILocDataFrameCategorical:
@pytest.fixture
def orig(self):
cats = Categorical(["a", "a", "a", "a", "a", "a", "a"], categories=["a", "b"])
idx = Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 1, 1, 1, 1, 1, 1]
orig = | DataFrame({"cats": cats, "values": values}, index=idx) | pandas.DataFrame |
"""Evaluate multiple models in multiple experiments, or evaluate baseline on multiple datasets
TODO: use hydra or another model to manage the experiments
"""
import os
import sys
import json
import argparse
import logging
from glob import glob
import time
import string
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s', level=logging.INFO)
import numpy as np
import pandas as pd
import h5py
import scipy
import scipy.interpolate
import scipy.stats
import torch
import dill
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.ticker as ticker
import matplotlib.colors as colors
import matplotlib.patheffects as pe
import matplotlib.animation as animation
from tqdm import tqdm
from tabulate import tabulate
import utility as util
from helper import load_model, prediction_output_to_trajectories
pd.set_option('io.hdf.default_format','table')
############################
# Bag-of-N (BoN) FDE metrics
############################
def compute_min_FDE(predict, future):
return np.min(np.linalg.norm(predict[...,-1,:] - future[-1], axis=-1))
def compute_min_ADE(predict, future):
mean_ades = np.mean(np.linalg.norm(predict - future, axis=-1), axis=-1)
return np.min(mean_ades)
def evaluate_scene_BoN(scene, ph, eval_stg, hyp, n_predictions=20, min_fde=True, min_ade=True):
predictconfig = util.AttrDict(ph=ph, num_samples=n_predictions, z_mode=False, gmm_mode=False,
full_dist=False, all_z_sep=False)
max_hl = hyp['maximum_history_length']
with torch.no_grad():
predictions = eval_stg.predict(scene,
np.arange(scene.timesteps), predictconfig.ph,
num_samples=predictconfig.num_samples,
min_future_timesteps=predictconfig.ph,
z_mode=predictconfig.z_mode,
gmm_mode=predictconfig.gmm_mode,
full_dist=predictconfig.full_dist,
all_z_sep=predictconfig.all_z_sep)
prediction_dict, histories_dict, futures_dict = \
prediction_output_to_trajectories(
predictions, dt=scene.dt, max_h=max_hl, ph=predictconfig.ph, map=None)
batch_metrics = {'min_ade': list(), 'min_fde': list()}
for t in prediction_dict.keys():
for node in prediction_dict[t].keys():
if min_ade:
batch_metrics['min_ade'].append(compute_min_ADE(prediction_dict[t][node], futures_dict[t][node]))
if min_fde:
batch_metrics['min_fde'].append(compute_min_FDE(prediction_dict[t][node], futures_dict[t][node]))
return batch_metrics
def evaluate_BoN(env, ph, eval_stg, hyp, n_predictions=20, min_fde=True, min_ade=True):
batch_metrics = {'min_ade': list(), 'min_fde': list()}
prefix = f"Evaluate Bo{n_predictions} (ph = {ph}): "
for scene in tqdm(env.scenes, desc=prefix, dynamic_ncols=True, leave=True):
_batch_metrics = evaluate_scene_BoN(scene, ph, eval_stg, hyp,
n_predictions=n_predictions, min_fde=min_fde, min_ade=min_ade)
batch_metrics['min_ade'].extend(_batch_metrics['min_ade'])
batch_metrics['min_fde'].extend(_batch_metrics['min_fde'])
return batch_metrics
###############
# Other metrics
###############
def make_interpolate_map(scene):
map = scene.map['VEHICLE']
obs_map = 1 - np.max(map.data[..., :, :, :], axis=-3) / 255
interp_obs_map = scipy.interpolate.RectBivariateSpline(
range(obs_map.shape[0]),
range(obs_map.shape[1]),
obs_map, kx=1, ky=1)
return interp_obs_map
def compute_num_offroad_viols(interp_map, scene_map, predicted_trajs):
"""Count the number of predicted trajectories that go off the road.
Note this does not count trajectories that go over road/lane dividers.
Parameters
==========
interp_map : scipy.interpolate.RectBivariateSpline
Interpolation to get road obstacle indicator value from predicted points.
scene_map : trajectron.environment.GeometricMap
Map transform the predicted points to map coordinates.
predicted_trajs : ndarray
Predicted trajectories of shape (number of predictions, number of timesteps, 2).
Returns
=======
int
A value between [0, number of predictions].
"""
old_shape = predicted_trajs.shape
pred_trajs_map = scene_map.to_map_points(predicted_trajs.reshape((-1, 2)))
traj_values = interp_map(pred_trajs_map[:, 0], pred_trajs_map[:, 1], grid=False)
# traj_values has shape (1, num_samples, ph).
traj_values = traj_values.reshape((old_shape[0], old_shape[1], old_shape[2]))
# num_viol_trajs is an integer in [0, num_samples].
return np.sum(traj_values.max(axis=2) > 0, dtype=float)
def compute_kde_nll(predicted_trajs, gt_traj):
kde_ll = 0.
log_pdf_lower_bound = -20
num_timesteps = gt_traj.shape[0]
num_batches = predicted_trajs.shape[0]
for batch_num in range(num_batches):
for timestep in range(num_timesteps):
try:
kde = scipy.stats.gaussian_kde(predicted_trajs[batch_num, :, timestep].T)
pdf = kde.logpdf(gt_traj[timestep].T)
pdf = np.clip(kde.logpdf(gt_traj[timestep].T), a_min=log_pdf_lower_bound, a_max=None)[0]
kde_ll += pdf / (num_timesteps * num_batches)
except np.linalg.LinAlgError:
kde_ll = np.nan
return -kde_ll
def compute_ade(predicted_trajs, gt_traj):
error = np.linalg.norm(predicted_trajs - gt_traj, axis=-1)
ade = np.mean(error, axis=-1)
return ade.flatten()
def compute_fde(predicted_trajs, gt_traj):
final_error = np.linalg.norm(predicted_trajs[:, :, -1] - gt_traj[-1], axis=-1)
return final_error.flatten()
########################
# Most Likely Evaluation
########################
def evaluate_scene_most_likely(scene, ph, eval_stg, hyp,
ade=True, fde=True):
predictconfig = util.AttrDict(ph=ph, num_samples=1,
z_mode=True, gmm_mode=True, full_dist=False, all_z_sep=False)
max_hl = hyp['maximum_history_length']
with torch.no_grad():
predictions = eval_stg.predict(scene,
np.arange(scene.timesteps), predictconfig.ph,
num_samples=predictconfig.num_samples,
min_future_timesteps=predictconfig.ph,
z_mode=predictconfig.z_mode,
gmm_mode=predictconfig.gmm_mode,
full_dist=predictconfig.full_dist,
all_z_sep=predictconfig.all_z_sep)
prediction_dict, histories_dict, futures_dict = \
prediction_output_to_trajectories(
predictions, dt=scene.dt, max_h=max_hl, ph=predictconfig.ph, map=None)
batch_metrics = {'ade': list(), 'fde': list()}
for t in prediction_dict.keys():
for node in prediction_dict[t].keys():
if ade:
batch_metrics['ade'].extend(
compute_ade(prediction_dict[t][node], futures_dict[t][node]) )
if fde:
batch_metrics['fde'].extend(
compute_fde(prediction_dict[t][node], futures_dict[t][node]) )
return batch_metrics
def evaluate_most_likely(env, ph, eval_stg, hyp,
ade=True, fde=True):
batch_metrics = {'ade': list(), 'fde': list()}
prefix = f"Evaluate Most Likely (ph = {ph}): "
for scene in tqdm(env.scenes, desc=prefix, dynamic_ncols=True, leave=True):
_batch_metrics = evaluate_scene_most_likely(scene, ph, eval_stg, hyp,
ade=ade, fde=fde)
batch_metrics['ade'].extend(_batch_metrics['ade'])
batch_metrics['fde'].extend(_batch_metrics['fde'])
return batch_metrics
#################
# Full Evaluation
#################
def evaluate_scene_full(scene, ph, eval_stg, hyp,
ade=True, fde=True, kde=True, offroad_viols=True):
num_samples = 2000
predictconfig = util.AttrDict(ph=ph, num_samples=num_samples,
z_mode=False, gmm_mode=False, full_dist=False, all_z_sep=False)
max_hl = hyp['maximum_history_length']
with torch.no_grad():
predictions = eval_stg.predict(scene,
np.arange(scene.timesteps), predictconfig.ph,
num_samples=predictconfig.num_samples,
min_future_timesteps=predictconfig.ph,
z_mode=predictconfig.z_mode,
gmm_mode=predictconfig.gmm_mode,
full_dist=predictconfig.full_dist,
all_z_sep=predictconfig.all_z_sep)
prediction_dict, histories_dict, futures_dict = \
prediction_output_to_trajectories(
predictions, dt=scene.dt, max_h=max_hl, ph=predictconfig.ph, map=None)
interp_map = make_interpolate_map(scene)
map = scene.map['VEHICLE']
batch_metrics = {'ade': list(), 'fde': list(), 'kde': list(), 'offroad_viols': list()}
for t in prediction_dict.keys():
for node in prediction_dict[t].keys():
if ade:
batch_metrics['ade'].extend(
compute_ade(prediction_dict[t][node], futures_dict[t][node]) )
if fde:
batch_metrics['fde'].extend(
compute_fde(prediction_dict[t][node], futures_dict[t][node]) )
if offroad_viols:
batch_metrics['offroad_viols'].extend(
[ compute_num_offroad_viols(interp_map, map, prediction_dict[t][node]) / float(num_samples) ])
if kde:
batch_metrics['kde'].extend(
[ compute_kde_nll(prediction_dict[t][node], futures_dict[t][node]) ])
return batch_metrics
def evaluate_full(env, ph, eval_stg, hyp,
ade=True, fde=True, kde=True, offroad_viols=True):
batch_metrics = {'ade': list(), 'fde': list(), 'kde': list(), 'offroad_viols': list()}
prefix = f"Evaluate Full (ph = {ph}): "
for scene in tqdm(env.scenes, desc=prefix, dynamic_ncols=True, leave=True):
_batch_metrics = evaluate_scene_full(scene, ph, eval_stg, hyp,
ade=ade, fde=fde, kde=kde, offroad_viols=offroad_viols)
batch_metrics['ade'].extend(_batch_metrics['ade'])
batch_metrics['fde'].extend(_batch_metrics['fde'])
batch_metrics['kde'].extend(_batch_metrics['kde'])
batch_metrics['offroad_viols'].extend(_batch_metrics['offroad_viols'])
return batch_metrics
##########
# Datasets
##########
dataset_dir = "../../.."
dataset_1 = util.AttrDict(
test_set_path=f"{ dataset_dir }/carla_v3-1_dataset/v3-1_split1_test.pkl",
name='v3-1_split1_test',
desc="CARLA synthesized dataset with heading fix, occlusion fix, and 32 timesteps.")
dataset_2 = util.AttrDict(
test_set_path=f"{ dataset_dir }/carla_v3-1-1_dataset/v3-1-1_split1_test.pkl",
name='v3-1-1_split1_test',
desc="CARLA synthesized dataset with heading fix, occlusion fix, and 32 timesteps.")
dataset_3 = util.AttrDict(
test_set_path=f"{ dataset_dir }/carla_v3-1-2_dataset/v3-1-2_split1_test.pkl",
name='v3-1-2_split1_test',
desc="CARLA synthesized dataset with heading fix, occlusion fix, and 32 timesteps.")
DATASETS = [dataset_1, dataset_2, dataset_3]
def load_dataset(dataset):
logging.info(f"Loading dataset: {dataset.name}: {dataset.desc}")
with open(dataset.test_set_path, 'rb') as f:
eval_env = dill.load(f, encoding='latin1')
return eval_env
#############
# Experiments
#############
"""
The experiments to evaluate are:
- 20210621 one model trained on NuScenes to use as baseline for other evaluation
- 20210801 have models trained from v3-1-1 (train set has 200 scenes). Compare MapV2, MapV3.
- 20210802 have models trained from v3-1-1. MapV5 squeezes map encoding to size 32 using FC.
- 20210803 have models trained from v3-1-1. Compare map, mapV4. MapV4 with multi K values. MapV4 does not apply FC. May have size 100 or 150.
- 20210804 have models trained from v3-1 (train set has 300 scenes). Compare map with mapV4.
- 20210805 have models trained from v3-1 (train set has 300 scenes). MapV4 with multi K values.
- 20210812 have models trained from v3-1-1 rebalanced. Models are trained 20 epochs.
- 20210815 have models trained from v3-1-1 rebalanced. Models are trained 40 epochs.
- 20210816 have models trained from v3-1-2 (train set has 600 scenes) rebalanced.
"""
model_dir = "models"
baseline_model = util.AttrDict(
path=f"{ model_dir }/20210621/models_19_Mar_2021_22_14_19_int_ee_me_ph8",
desc="Base model +Dynamics Integration, Maps with K=25 latent values "
"(on NuScenes dataset)")
experiment_1 = util.AttrDict(
models_dir=f"{ model_dir }/20210801",
dataset=dataset_2,
desc="20210801 have models trained from v3-1-1 (train set has 200 scenes). Compare MapV2, MapV3.")
experiment_2 = util.AttrDict(
models_dir=f"{ model_dir }/20210802",
dataset=dataset_2,
desc="20210802 have models trained from v3-1-1. MapV5 squeezes map encoding to size 32 using FC.")
experiment_3 = util.AttrDict(
models_dir=f"{ model_dir }/20210803",
dataset=dataset_2,
desc="20210803 have models trained from v3-1-1. Compare map, mapV4. MapV4 with multi K values. "
"MapV4 does not apply FC. May have size 100 or 150.")
experiment_4 = util.AttrDict(
models_dir=f"{ model_dir }/20210804",
dataset=dataset_1,
desc="20210804 have models trained from v3-1 (train set has 300 scenes). Compare map with mapV4.")
experiment_5 = util.AttrDict(
models_dir=f"{ model_dir }/20210805",
dataset=dataset_1,
desc="20210805 have models trained from v3-1 (train set has 300 scenes). MapV4 with multi K values.")
experiment_6 = util.AttrDict(
models_dir=f"{ model_dir }/20210812",
dataset=dataset_2,
desc="20210812 have models trained from v3-1-1 rebalanced. Models are trained 20 epochs.")
experiment_7 = util.AttrDict(
models_dir=f"{ model_dir }/20210815",
dataset=dataset_2, ts=40,
desc="20210815 have models trained from v3-1-1 rebalanced. Models are trained 40 epochs.")
experiment_8 = util.AttrDict(
models_dir=f"{ model_dir }/20210816",
dataset=dataset_3,
desc="20210816 have models trained from v3-1-2 (train set has 600 scenes) rebalanced.")
EXPERIMENTS = [experiment_1, experiment_2, experiment_3, experiment_4, experiment_5, experiment_6, experiment_7, experiment_8]
def _load_model(model_path, eval_env, ts=20):
eval_stg, hyp = load_model(model_path, eval_env, ts=ts)#, device='cuda')
return eval_stg, hyp
PREDICTION_HORIZONS = [2,4,6,8]
def run_evaluate_experiments(config):
if config.experiment_index is not None and config.experiment_index >= 1:
experiments = [EXPERIMENTS[config.experiment_index - 1]]
else:
experiments = EXPERIMENTS
######################
# Evaluate experiments
######################
# results_filename = f"results_{time.strftime('%d_%b_%Y_%H_%M_%S', time.localtime())}.h5"
logging.info("Evaluating each experiment")
for experiment in experiments:
results_key = experiment.models_dir.split('/')[-1]
results_filename = f"results_{results_key}.h5"
logging.info(f"Evaluating models in experiment: {experiment.desc}")
logging.info(f"Writing to: {results_filename}")
eval_env = load_dataset(experiment.dataset)
# need hyper parameters to do this, but have to load models first
has_computed_scene_graph = False
for model_path in glob(f"{experiment.models_dir}/*"):
model_key = '/'.join(model_path.split('/')[-2:])
ts = getattr(experiment, 'ts', 20)
eval_stg, hyp = _load_model(model_path, eval_env, ts=ts)
if not has_computed_scene_graph:
prefix = f"Preparing Node Graph: "
for scene in tqdm(eval_env.scenes, desc=prefix, dynamic_ncols=True, leave=True):
scene.calculate_scene_graph(eval_env.attention_radius,
hyp['edge_addition_filter'], hyp['edge_removal_filter'])
has_computed_scene_graph = True
logging.info(f"Evaluating: {model_key}")
BoN_results_key = '/'.join([experiment.dataset.name] + model_path.split('/')[-2:] + ['BoN'])
with pd.HDFStore(results_filename, 'a') as s:
for ph in PREDICTION_HORIZONS:
batch_metrics = evaluate_BoN(eval_env, ph, eval_stg, hyp)
df = | pd.DataFrame(batch_metrics) | pandas.DataFrame |
import pandas as pd
import numpy as np
import glob
import os, os.path as osp
DIFFSDIR1 = '../../data/dfdc/diffs1/'
DIFFSDIR2 = '../../data/dfdc/diffs2/'
reals = glob.glob(osp.join(DIFFSDIR1, 'reals/*.png'))
reals.extend(glob.glob(osp.join(DIFFSDIR2, 'reals/*.png')))
fakes = glob.glob(osp.join(DIFFSDIR1, 'fakes/*.png'))
fakes.extend(glob.glob(osp.join(DIFFSDIR2, 'fakes/*.png')))
masks = glob.glob(osp.join(DIFFSDIR1, 'masks/*.npy'))
masks.extend(glob.glob(osp.join(DIFFSDIR2, 'masks/*.npy')))
df = | pd.read_csv('../../data/dfdc/train.csv') | pandas.read_csv |
# Data Science with SQL Server Quick Start Guide
# Chapter 03
# This is a comment
print("Hello World!")
# This line is ignored - it is a comment again
print('Another string.')
print('O"Brien') # In-line comment
print("O'Brien")
# Simple expressions
3 + 2
print("The result of 5 + 30 / 6 is:", 5 + 30 / 6)
10 * 3 - 6
11 % 4
print("Is 8 less or equal to 5?", 8 <= 5)
print("Is 8 greater than 5?", 8 > 5)
# Integer
a = 3
b = 4
a ** b
# Float
c = 6.0
d = float(7)
print(c, d)
# Formatted strings
# Variables in print()
e = "repeat"
f = 5
print("Let's %s string formatting %d times." % (e, f))
# String.format()
four_par = "String {} {} {} {}"
print(four_par.format(1, 2, 3, 4))
print(four_par.format('a', 'b', 'c', 'd'))
# More strings
print("""Three double quotes
are needed to delimit strings in multiple lines.
You can have as many lines as you wish.""")
a = "I am 5'11\" tall"
b = 'I am 5\'11" tall'
print("\t" + a + "\n\t" + b)
# Functions
def nopar():
print("No parameters")
def add(a, b):
return a + b
# Call without arguments
nopar()
# Call with variables and math
a = 10
b = 20
add(a / 5, b / 4)
# if..elif..else
a = 10
b = 20
c = 30
if a > b:
print("a > b")
elif a > c:
print("a > c")
elif (b < c):
print("b < c")
if a < c:
print("a < c")
if b in range(10, 30):
print("b is between a and c")
else:
print("a is less than b and less than c")
# List and loops
animals = ["bee", "whale", "cow"]
nums = []
for animal in animals:
print("Animal: ", animal)
for i in range(2, 5):
nums.append(i)
print(nums)
i = 1
while i <= 3:
print(i)
i = i + 1
# Dictionary
CtyCou = {
"Paris": "France",
"Tokyo": "Japan",
"Lagos": "Nigeria"}
for city, country in CtyCou.items():
print("{0} is in {1}.".format(city, country))
# Demo graphics
# Imports
import numpy as np
import pandas as pd
import pyodbc
import matplotlib.pyplot as plt
# Reading the data from SQL Server
con = pyodbc.connect('DSN=AWDW;UID=RUser;PWD=<PASSWORD>')
query = """SELECT CustomerKey,
Age, YearlyIncome,
CommuteDistance, BikeBuyer
FROM dbo.vTargetMail;"""
TM = pd.read_sql(query, con)
# Info about the data
TM.head(5)
TM.shape
# Define CommuteDistance as categorical
TM['CommuteDistance'] = TM['CommuteDistance'].astype('category')
# Reordering Education
TM['CommuteDistance'].cat.reorder_categories(
["0-1 Miles",
"1-2 Miles","2-5 Miles",
"5-10 Miles", "10+ Miles"], inplace=True)
# Crosstabulation
cdbb = | pd.crosstab(TM.CommuteDistance, TM.BikeBuyer) | pandas.crosstab |
# -------------------------------------------------- ML 02/10/2019 ----------------------------------------------------#
#
# This is the class for poisson process
#
# -------------------------------------------------------------------------------------------------------------------- #
import numpy as np
import pandas as pd
import math
from handles.data_hand import get_slotted_data
from sklearn.linear_model import LinearRegression
from scipy.stats import kstest
import statsmodels.api as sm
import statsmodels.formula.api as smf
from modeling.stat.models import fit_neg_binom
from scipy.stats import expon,gamma,nbinom
import random
random.seed( 30 )
class poisson_process:
def __init__(self,events,x,slotmin=60,sesonality=24.00,x_meta=None,combine=None,variablity_lambda=True):
# x is the numeric features lambda depends on.
# x_meta are catagorical features that lambda depends on
# Sesonality is when to loop the ts back. i.e. like 24 hours
# x can be any factor levels. with _ in between each category. however, each catogory
# should be defined by a numeric indicator
self.x_names = np.array( x.columns )
self.ts = np.array(events)
self.x = np.array(x)
self.x_meta=x_meta
self.slotmin = slotmin
self.sesonality = float( sesonality )
self.processed_data = self.get_combined_ts_data(combine=combine)
self.def_scale_multiplier()
self._variablity_lambda = variablity_lambda
def combine_timeslots(self,x,combine):
p = x.copy()
p[np.in1d(x, combine)] = combine[0]
return p
def poles_fun(self,d):
return pd.DataFrame(d).apply(lambda x: 1/(x**3))
def def_scale_multiplier(self):
# this is based on emperical data
average_mat = pd.DataFrame({'2014':[0.237053898,0.23033784,0.22646637,0.224855127,0.22145071,0.22017719,0.219680942],
'2015':[0.190591233,0.185363899,0.183113651,0.180825924,0.179276851,0.179478113,0.17919847]}).T
average_mat.columns = [1000,1100,1200,1300,1400,1500,1600]
average_mat=average_mat.reset_index()
average_mat=average_mat.melt(id_vars=["index"],var_name="Poles",value_name="Value")
cols = ['year','poles','scale']
average_mat.columns = cols
average_mat[cols] = average_mat[cols].apply(pd.to_numeric, errors='coerce')
average_mat['poles']=self.poles_fun(average_mat['poles'])
regressor = LinearRegression()
regressor.fit(average_mat[['year','poles']], average_mat['scale'])
self.scale_multiplier_predictor = regressor
self.reset_scale_multiplier()
def reset_scale_multiplier(self):
self._scale_multiplier = 1
def avg_scale_pred(self,year,poles):
return self.scale_multiplier_predictor.predict(np.array([year,
np.array(self.poles_fun([poles]))]).reshape(1, -1))
def get_processed_data(self):
diff_col_name = 'Aarrival_diff'
delta_t = np.diff(self.ts, n=1).reshape(-1, 1)
fin_d = pd.DataFrame(np.concatenate((delta_t, self.x[:-1, :]), axis=1))
fin_d.columns = np.concatenate(
(np.array(diff_col_name).reshape(-1, 1), np.array(self.x_names).reshape(-1, 1)), axis=0).flatten()
fin_d[diff_col_name] = pd.to_numeric(fin_d[diff_col_name])
# split the values in the factor that was provided to us
split = fin_d[self.x_names[0]].str.split("_", -1)
n = []
for i in range(0, len(split[0])):
fin_d['f' + str(i)] = split.str.get(i)#.astype(float) # update this if code breaks
n.append('f' + str(i))
n.append(self.x_names[1])
self.all_names = n
fin_d = fin_d.sort_values(by=n)
return fin_d
def get_combined_ts_data(self,combine):
# combine timeslots
# if given argument = combine -- array of time slots to combine. we will replace these with
# the first element of the combine array
# start time internal is the timeslots to model the data on
self.processed_data = self.get_processed_data()
self.combine = combine
if combine is None:
self.combined_slots = False
combined_timeslots = self.processed_data[self.x_names[1]]
else:
self.combined_slots = True
combined_timeslots = self.combine_timeslots(self.processed_data[self.x_names[1]], combine=combine)
self.processed_data['Start_time_internal'] = combined_timeslots
return self.processed_data
def get_slotted_data(self,data, slot_secs):
return get_slotted_data(data=data,slot_secs=slot_secs)
# ------------------------------------------- FITTING --------------------------------------------------------------
def daywise_training_data(self,d,combine,fac1,fac2,f1,days,orignal_start_slot):
# fac2 is out internal slots that are combined
# it is also worth noting that we calculate the average for combined slots and then put them for
# all the slots for that given duration
if self.combined_slots:
x = fac2[(fac1 == f1)]
day = days[(fac1 == f1)]
model_d = []
for day_i in np.unique(day):
model_d_temp = []
for t_i in np.unique(x):
try:
model_d_temp.append([[t_i, expon.fit(pd.to_numeric(d[(x == t_i) & (day == day_i)]))[1], day_i]])
except:
continue
model_d_temp = np.vstack(model_d_temp)
scale_val = model_d_temp[(model_d_temp[:, 0] == combine[0])].flatten()[1]
add = [[i, scale_val, day_i] for i in combine[1:]]
model_d_temp = np.concatenate((model_d_temp, add))
model_d.append(model_d_temp)
model_d = np.vstack(model_d)
else:
x = orignal_start_slot[(fac1 == f1)]
day = days[(fac1 == f1)]
model_d = []
for day_i in np.unique(day):
model_d_temp = []
for t_i in np.unique(x):
try:
model_d_temp.append([[t_i, expon.fit(pd.to_numeric(d[(x == t_i) & (day == day_i)]))[1], day_i]])
except:
continue
model_d_temp = np.vstack(model_d_temp)
model_d.append(model_d_temp)
model_d = np.vstack(model_d)
return model_d
def discreet_fit_model(self,data,x):
data_gamma = pd.DataFrame({'days':data, 'arrivalslot':x,'indicator':1})
data_gamma = data_gamma.groupby(['days','arrivalslot']).agg(['count']).reset_index()
data_gamma.columns = ['days', 'arrivalslot','count']
data_save = data_gamma['count']
x_save = data_gamma['arrivalslot']
ks_t_D = pd.DataFrame()
ks_t_pval = pd.DataFrame()
t_t_pval = pd.DataFrame()
exp_loc = pd.DataFrame()
exp_scale = pd.DataFrame()
exp_shape = pd.DataFrame()
time_slot = pd.DataFrame()
pos_l = pd.DataFrame()
neg_bio_r = pd.DataFrame()
neg_bio_p = pd.DataFrame()
for f2 in np.unique(data_gamma['arrivalslot']):
d = pd.to_numeric( data_gamma[data_gamma['arrivalslot'] == f2]['count'] )
# poission
lam = np.mean(d)
# gamma
alpha,loc, beta = gamma.fit(d,loc=0)
# ks test
D , kspval = kstest(d,'gamma', args=(alpha,loc,beta))
# ttest - one sided
# sample2 = gamma.rvs(a = alpha, loc=loc, scale=beta, size=d.shape[0])
val , pval = 0,0 #ttest_ind(d,sample2)
# neg_binom
r,p = fit_neg_binom(vec=np.array(d).flatten(),init=0.0000001)
# if we have combined data then add same model to all combined timeslots
if self.combined_slots and f2 == self.combine[0]:
for var in self.combine:
pos_l = pos_l.append(pd.DataFrame([lam]))
exp_loc = exp_loc.append(pd.DataFrame([loc]))
exp_shape = exp_shape.append(pd.DataFrame([alpha]))
exp_scale = exp_scale.append(pd.DataFrame([beta]))
neg_bio_r = neg_bio_r.append(pd.DataFrame([r]))
neg_bio_p = neg_bio_p.append(pd.DataFrame([p]))
ks_t_D = ks_t_D.append(pd.DataFrame([D]))
ks_t_pval = ks_t_pval.append(pd.DataFrame([kspval]))
t_t_pval = t_t_pval.append(pd.DataFrame([pval / 2]))
# add timeslot
time_slot = time_slot.append([var])
else:
pos_l = pos_l.append(pd.DataFrame([lam]))
exp_loc = exp_loc.append(pd.DataFrame([loc]))
exp_shape = exp_shape.append(pd.DataFrame([alpha]))
exp_scale = exp_scale.append(pd.DataFrame([beta]))
neg_bio_r = neg_bio_r.append(pd.DataFrame([r]))
neg_bio_p = neg_bio_p.append(pd.DataFrame([p]))
ks_t_D = ks_t_D.append(pd.DataFrame([D]))
ks_t_pval = ks_t_pval.append(pd.DataFrame([kspval]))
t_t_pval = t_t_pval.append(pd.DataFrame([pval / 2]))
# add timeslot
time_slot = time_slot.append([f2])
# this is the final fit
fit = pd.DataFrame()
fit[[self.x_names[1]]] = time_slot
fit['gamma_loc'] = np.array(exp_loc).flatten()
fit['gamma_scale'] = np.array(exp_scale).flatten()
fit['gamma_shape'] = np.array(exp_shape).flatten()
fit['KS_D'] = np.array(ks_t_D).flatten()
fit['KS_PVal'] = np.array(ks_t_pval).flatten()
fit['Ttest_PVal'] = np.array(t_t_pval).flatten()
fit['Poisson_lam'] = np.array(pos_l).flatten()
fit['Negbio_r'] = np.array(neg_bio_r).flatten()
fit['Negbio_p'] = np.array(neg_bio_p).flatten()
return fit,data_save,x_save
def neg_bio_reg_fit_model(self,data,x):
data_gamma = pd.DataFrame({'days': data, 'arrivalslot': x, 'indicator': 1})
data_gamma = data_gamma.groupby(['days', 'arrivalslot']).agg(['count']).reset_index()
data_gamma.columns = ['days', 'arrivalslot', 'count']
data_save = data_gamma['count']
x_save = data_gamma['arrivalslot']
nb_mu = pd.DataFrame()
nb_p = pd.DataFrame()
nb_n = pd.DataFrame()
nb_alpha = pd.DataFrame()
time_slot = pd.DataFrame()
# data_gamma.to_csv("aaaaaaaaaaaaaaaaaa.csv")
for f2 in np.unique(data_gamma['arrivalslot']):
d = pd.to_numeric(data_gamma[data_gamma['arrivalslot'] == f2]['count'])
X_train = np.ones(len(d))
try:
df_train = pd.DataFrame({'counts':d,'Intercept':X_train})
# Calculating alpha = shape parameter
# theta = (1/alpha) = r = number of sucess
# Using the statsmodels GLM class, train the Poisson regression model on the training data set
poisson_training_results = sm.GLM(d, X_train, family=sm.families.Poisson()).fit()
df_train['BB_LAMBDA'] = poisson_training_results.mu
df_train['AUX_OLS_DEP'] = df_train.apply(
lambda x: ((x['counts'] - x['BB_LAMBDA']) ** 2 - x['counts']) / x['BB_LAMBDA'], axis=1)
ols_expr = """AUX_OLS_DEP ~ BB_LAMBDA - 1"""
aux_olsr_results = smf.ols(ols_expr, df_train).fit()
alpha = aux_olsr_results.params[0]
# introducing a minimum liimit on alpha
# -- putting alpha = 0.00001 trigggers poisson distribution
if alpha <= 0:
alpha = 0.00001 # ---> this will trigger poisson while prediciton
# alpha = 0.25 # this just introductes a min limit on alpha
# # use this when we dont want to use calculated alpha
# alpha = 0.2
# calculating the mean parameter mu
nb2_training_results = sm.GLM(d.astype(float), X_train.astype(float),
family=sm.families.NegativeBinomial(alpha = alpha)).fit()
mean = float( np.exp(nb2_training_results.params) )# float(np.mean(d))
# calculate n and p
n = float(1/alpha)
var = mean + 1 / n * mean ** 2
p = float(1-((var - mean) / var))
# var = mean + (np.power(mean,2)*alpha)
# n = float((np.power(mean,2))/ (var - mean))
# p = float((var - mean)/var)
except:
n,p,mean,alpha = -1,-1,-1,-1
# if we have combined data then add same model to all combined timeslots
if self.combined_slots and f2 == self.combine[0]:
for var in self.combine:
nb_mu = nb_mu.append(pd.DataFrame([mean]))
nb_p = nb_p.append(pd.DataFrame([p]))
nb_n = nb_n.append(pd.DataFrame([n]))
nb_alpha = nb_alpha.append(pd.DataFrame([alpha]))
time_slot = time_slot.append([var])
else:
nb_mu = nb_mu.append(pd.DataFrame([mean]))
nb_p = nb_p.append(pd.DataFrame([p]))
nb_n = nb_n.append(pd.DataFrame([n]))
nb_alpha = nb_alpha.append(pd.DataFrame([alpha]))
# add timeslot
time_slot = time_slot.append([f2])
# this is the final fit
fit = pd.DataFrame()
fit[[self.x_names[1]]] = time_slot
fit['nb_n'] = np.array(nb_n).flatten()
fit['nb_p'] = np.array(nb_p).flatten()
fit['nb_mu'] = np.array(nb_mu).flatten()
fit['nb_alpha'] = np.array(nb_alpha).flatten()
return fit,data_save,x_save
def fit(self,lambda_mod='expon_fit',combine=np.arange(1,7),year=2015,poles=1677,verbose=1):
# ------------------------------ Prepration -------------------------------------------------
# create dataset for modeling
# if continous = True, we replace the values of means with continous values
self.lambda_mod = lambda_mod
# create the scale multipler for this function
self._fit_year = int(year)
self._fit_poles = int(poles)
self._scale_multiplier_fit = self.avg_scale_pred(year=self._fit_year,poles=self._fit_poles)
# this is done because poly model has -ve starting
self._scale_old = | pd.Series(self._scale_multiplier_fit) | pandas.Series |
import os
import random
import sys
import joblib
import math
import lightgbm as lgb
import xgboost as xgb
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.svm as svm
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.linear_model import LinearRegression, LogisticRegression
from catboost import CatBoostClassifier
from sklearn.metrics import (accuracy_score, average_precision_score,
classification_report, confusion_matrix, f1_score,
precision_recall_curve, roc_auc_score, roc_curve)
from sklearn.model_selection import GroupKFold
from sklearn.naive_bayes import GaussianNB
from keras.layers import LSTM
from keras.layers import Dense
from keras.models import Sequential
from keras.utils import to_categorical
import keras.callbacks as kcallbacks
from utilClass import RocAucMetricCallback
from utils import series_to_supervised
from tqdm import tqdm
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import RandomUnderSampler
from imblearn.pipeline import Pipeline
sys.path.append('..')
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
class AtoA:
def __init__(self, mode, type='df', seed=2021, scale='all'):
self.seed = seed
self.window = 0 # 在线数据读取中滑动窗口的长度
self.win_df = pd.DataFrame() # 在线数据读取中始终维持一个长度为最大滑动窗口的dataframe
self.mode = mode # 离线或在线
self.type = type # df 或者 dcs
self.scale = scale # 模型量级
self.current_row = None # 当前时刻样本
self.pre_row = None # 上一时刻样本
# dogfight特征工程工具函数
def FE_DF(self, data):
""" DF特征工程
Args:
data (dataframe): 原始数据
Returns:
DataFrame: 特征工程后数据
"""
data = data.sort_values(by=['id'])
if self.scale == 'all':
# 计算敌机的速度,先用diffh函数得到和上一时刻xyz差值,然后除以时间得到速度
for f in ['x', 'y', 'z']:
data['enemy_v_{}'.format(f)] = data.groupby('id')[
'enemy_{}'.format(f)].diff(1) / 0.02
# 敌我两机加速度,先用diffh函数得到和上一时刻v_x,v_y,v_z差值,然后除以时间得到加速度
for f in ['v_x', 'v_y', 'v_z']:
data[f'my_{f}_acc'] = data.groupby(
'id')[f'my_{f}'].diff() / 0.2
data[f'enemy_{f}_acc'] = data.groupby(
'id')[f'enemy_{f}'].diff() / 0.2
# 敌我两机速度与位置交互式差值
for f in ['x', 'y', 'z', 'v_x', 'v_y', 'v_z']:
data[f'{f}_me_minus'] = data[f'my_{f}'] - data[f'enemy_{f}']
# 飞机之间的距离
data['distance'] = ((data['my_x'] - data['enemy_x'])**2 +
(data['my_y'] - data['enemy_y'])**2 +
(data['my_z'] - data['enemy_z'])**2)**0.5
# 瞄准夹角
data['cos'] = ((data['my_v_x'] * (data['enemy_x'] - data['my_x'])) +
(data['my_v_y'] * (data['enemy_y'] - data['my_y'])) +
(data['my_v_z'] * (data['enemy_z'] - data['my_z'])))
# 合速度
data['speedAll'] = ((data['my_v_x']**2 + data['my_v_y']**2 +
data['my_v_z']**2)**0.5)
# 夹角cos值
data['cosValue'] = data['cos'] / (data['speedAll'] * data['distance'])
# 缺失值补0
data.fillna(0, inplace=True)
return data
def online_FE_DF(self, row_dict):
""" DF在线特征工程
Args:
row_dict(dict): 传入的一行字典记录
Returns:
DataFrame: 加入特征后的记录dataframe
"""
# 将字典转为dataframe格式
data = pd.DataFrame(row_dict, index=[0])
# 飞机之间的距离
data['distance'] = ((data['my_x'] - data['enemy_x'])**2 +
(data['my_y'] - data['enemy_y'])**2 +
(data['my_z'] - data['enemy_z'])**2)**0.5
# 瞄准夹角
data['cos'] = ((data['my_v_x'] * (data['enemy_x'] - data['my_x'])) +
(data['my_v_y'] * (data['enemy_y'] - data['my_y'])) +
(data['my_v_z'] * (data['enemy_z'] - data['my_z'])))
# 合速度
data['speedAll'] = ((data['my_v_x']**2 + data['my_v_y']**2 +
data['my_v_z']**2)**0.5)
# 夹角cos值
data['cosValue'] = data['cos'] / (data['speedAll'] * data['distance'])
# 缺失值补0
data.fillna(0, inplace=True)
return data
# DCS特征工程工具函数
def FE_DCS(self, data_):
""" DCS特征工程
Args:
data (dataframe): 原始数据
Returns:
DataFrame: 特征工程后数据
"""
data = data_.copy(deep=True)
if self.mode == 'offline':
# 如果是离线训练,需要根据id进行数据分组
data = data.sort_values(by=['id'])
# 飞机之间的距离
data['distance'] = (
(data['my_position_x'] - data['enemy_position_x'])**2 +
(data['my_position_y'] - data['enemy_position_y'])**2 +
(data['my_position_z'] - data['enemy_position_z'])**2)**0.5
# 向量乘法,向量 a = (x,y,z) b = (x2,y2,z2) c = (x3,y3,z3),a代表我机速度向量
# b代表位置向量,c代表敌机位置向量,我机中心到敌机中心向量d = c - b
# d与a之间cos = d×a/(|d|*|a|)
data['cos'] = ((data['my_speed_x'] *
(data['enemy_position_x'] - data['my_position_x'])) +
(data['my_speed_y'] *
(data['enemy_position_y'] - data['my_position_y'])) +
(data['my_speed_z'] *
(data['enemy_position_z'] - data['my_position_z'])))
# 速度向量
data['speedAll'] = ((data['my_speed_x']**2 + data['my_speed_y']**2 +
data['my_speed_z']**2)**0.5)
# 向量之间夹角
data['cosValue'] = data['cos'] / (data['speedAll'] * data['distance'])
# 敌我两机位置交互式差值
for f in ['position_x', 'position_y', 'position_z']:
data[f'{f}_diff'] = data[f'my_{f}'] - data[f'enemy_{f}']
return data
@staticmethod
def _caculate_speed_connect_cos(x, y, z, enemy_x, enemy_y, enemy_z,
speed_x, speed_y, speed_z):
"""
计算我敌连线矢量与我机速度矢量夹角
Args:
x, y, z: 我机坐标
enemy_x, enemy_y, enemy_z:敌机坐标
speed_x, speed_y, speed_z: 我机或敌机速度
Returns:
speed_connect_cos:我敌连线矢量与速度矢量夹角余弦值
"""
connect_vec = np.array([enemy_x - x, enemy_y - y, enemy_z - z])
my_speed_vec = np.array([speed_x, speed_y, speed_z])
speed_connect_cos = connect_vec.dot(my_speed_vec) / np.sqrt(
connect_vec.dot(connect_vec) * my_speed_vec.dot(my_speed_vec))
return speed_connect_cos
@staticmethod
def _caculate_speed_cos(speed_x, speed_y, speed_z, enemy_speed_x,
enemy_speed_y, enemy_speed_z):
"""
计算我机速度矢量与敌机速度矢量夹角
Args:
speed_x, speed_y, speed_z:我机速度
enemy_speed_x, enemy_speed_y, enemy_speed_z: 敌机速度
Returns:
speed_cos:敌机速度与我机速度矢量夹角余弦值
"""
my_speed_vec = np.array([speed_x, speed_y, speed_z])
enemy_speed_vec = np.array(
[enemy_speed_x, enemy_speed_y, enemy_speed_z])
speed_cos = my_speed_vec.dot(enemy_speed_vec) / np.sqrt(
my_speed_vec.dot(my_speed_vec) *
enemy_speed_vec.dot(enemy_speed_vec))
return speed_cos
def FE_DCS_new(self, data_):
"""新DCS任务特征工程
Args:
data_ (dataframe): 原始数据
Returns:
data: 特征工程后数据
"""
data = data_.copy()
data = data.sort_values(by=['id', 'ISO time'])
data.reset_index(drop=True, inplace=True)
data.rename(columns={
'U': 'x',
'V': 'z',
'Altitude': 'y',
'enemy_U': 'enemy_x',
'enemy_V': 'enemy_z',
'enemy_Altitude': 'enemy_y',
},
inplace=True)
if self.mode == 'offline':
if self.scale == 'all':
# 计算我机速度
data = pd.concat([
data,
pd.DataFrame({
'speed_x': data.groupby('id')['x'].diff(),
'speed_y': data.groupby('id')['y'].diff(),
'speed_z': data.groupby('id')['z'].diff()
})
],
sort=False,
axis=1)
data.fillna(0, inplace=True)
data[['speed_x', 'speed_y', 'speed_z'
]] = data[['speed_x', 'speed_y', 'speed_z']] / 0.05
data['speed'] = data.apply(lambda x: np.sqrt(x[
'speed_x']**2 + x['speed_y']**2 + x['speed_z']**2),
axis=1)
# 计算敌机速度
data = pd.concat([
data,
pd.DataFrame({
'enemy_speed_x':
data.groupby('id')['enemy_x'].diff(),
'enemy_speed_y':
data.groupby('id')['enemy_y'].diff(),
'enemy_speed_z':
data.groupby('id')['enemy_z'].diff()
})
],
sort=False,
axis=1)
data.fillna(0, inplace=True)
data[[
'enemy_speed_x', 'enemy_speed_y', 'enemy_speed_z'
]] = data[['enemy_speed_x', 'enemy_speed_y', 'enemy_speed_z'
]] / 0.05
data['enemy_speed'] = data.apply(
lambda x: np.sqrt(x['enemy_speed_x']**2 + x[
'enemy_speed_y']**2 + x['enemy_speed_z']**2),
axis=1)
# 计算敌我距离
data['distance'] = data.apply(lambda x: np.sqrt(
(x['x'] - x['enemy_x'])**2 + (x['y'] - x['enemy_y'])**2 +
(x['z'] - x['enemy_z'])**2),
axis=1)
# 计算我机速度与敌我连线夹角余弦值
data['speed_connect_cos'] = data.apply(
lambda x: self._caculate_speed_connect_cos(
x['x'], x['y'], x['z'], x['enemy_x'], x['enemy_y'], x[
'enemy_z'], x['speed_x'], x['speed_y'], x['speed_z'
]),
axis=1)
# 计算敌机速度与敌我连线夹角余弦值
data['enemy_speed_connect_cos'] = data.apply(
lambda x: self._caculate_speed_connect_cos(
x['x'], x['y'], x['z'], x['enemy_x'], x['enemy_y'], x[
'enemy_z'], x['enemy_speed_x'], x['enemy_speed_y'],
x['enemy_speed_z']),
axis=1)
# 计算两机速度夹角余弦值
data['speed_cos'] = data.apply(
lambda x: self._caculate_speed_cos(x['speed_x'], x[
'speed_y'], x['speed_z'], x['enemy_speed_x'], x[
'enemy_speed_y'], x['enemy_speed_z']),
axis=1)
# 我机朝向处理
data['Heading'] = data['Heading'] % 360
# 计算相对位置与速度
for f in [
'x', 'y', 'z', 'speed_x', 'speed_y', 'speed_z', 'speed'
]:
data[f'relative_{f}'] = data[f'enemy_{f}'] - data[f'{f}']
# 计算是否领先追逐
data['is_lead_chase'] = data.apply(
lambda x: self._is_lead_chase(
x['x'], x['y'], x['z'], x['enemy_x'], x['enemy_y'],
x['enemy_z'], x['speed_x'], x['speed_y'], x['speed_z'],
x['enemy_speed_x'], x['enemy_speed_y'], x[
'enemy_speed_z'], x['speed_connect_cos'], x[
'enemy_speed_connect_cos']),
axis=1)
# 筛除不能开火标签(两机距离大于1000或背对)
data['label'] = data.apply(
lambda x: 0 if x['speed_connect_cos'] < 0 or x[
'distance'] > 1000 else x['label'],
axis=1)
data.fillna(0, inplace=True)
data.dropna(inplace=True)
data.to_csv('a2a_fe.csv', index=False)
return data
elif self.scale == 'light':
# 计算我机速度
data = pd.concat([
data,
pd.DataFrame({
'speed_x': data.groupby('id')['x'].diff(),
'speed_y': data.groupby('id')['y'].diff(),
'speed_z': data.groupby('id')['z'].diff()
})
],
sort=False,
axis=1)
data.fillna(0, inplace=True)
data[['speed_x', 'speed_y', 'speed_z'
]] = data[['speed_x', 'speed_y', 'speed_z']] / 0.05
data['speed'] = data.apply(lambda x: np.sqrt(x[
'speed_x']**2 + x['speed_y']**2 + x['speed_z']**2),
axis=1)
# 计算敌机速度
data = pd.concat([
data,
pd.DataFrame({
'enemy_speed_x':
data.groupby('id')['enemy_x'].diff(),
'enemy_speed_y':
data.groupby('id')['enemy_y'].diff(),
'enemy_speed_z':
data.groupby('id')['enemy_z'].diff()
})
],
sort=False,
axis=1)
data.fillna(0, inplace=True)
data[[
'enemy_speed_x', 'enemy_speed_y', 'enemy_speed_z'
]] = data[['enemy_speed_x', 'enemy_speed_y', 'enemy_speed_z'
]] / 0.05
data['enemy_speed'] = data.apply(
lambda x: np.sqrt(x['enemy_speed_x']**2 + x[
'enemy_speed_y']**2 + x['enemy_speed_z']**2),
axis=1)
# 计算敌我距离
data['distance'] = data.apply(lambda x: np.sqrt(
(x['x'] - x['enemy_x'])**2 + (x['y'] - x['enemy_y'])**2 +
(x['z'] - x['enemy_z'])**2),
axis=1)
# 计算我机速度与敌我连线夹角余弦值
data['speed_connect_cos'] = data.apply(
lambda x: self._caculate_speed_connect_cos(
x['x'], x['y'], x['z'], x['enemy_x'], x['enemy_y'], x[
'enemy_z'], x['speed_x'], x['speed_y'], x['speed_z'
]),
axis=1)
# 计算敌机速度与敌我连线夹角余弦值
data['enemy_speed_connect_cos'] = data.apply(
lambda x: self._caculate_speed_connect_cos(
x['x'], x['y'], x['z'], x['enemy_x'], x['enemy_y'], x[
'enemy_z'], x['enemy_speed_x'], x['enemy_speed_y'],
x['enemy_speed_z']),
axis=1)
# 计算两机速度夹角余弦
data['speed_cos'] = data.apply(
lambda x: self._caculate_speed_cos(x['speed_x'], x[
'speed_y'], x['speed_z'], x['enemy_speed_x'], x[
'enemy_speed_y'], x['enemy_speed_z']),
axis=1)
# 计算相对位置
for f in ['z', 'speed']:
data[f'relative_{f}'] = data[f'enemy_{f}'] - data[f'{f}']
# 计算是否领先追逐
data['is_lead_chase'] = data.apply(
lambda x: self._is_lead_chase(
x['x'], x['y'], x['z'], x['enemy_x'], x['enemy_y'],
x['enemy_z'], x['speed_x'], x['speed_y'], x['speed_z'],
x['enemy_speed_x'], x['enemy_speed_y'], x[
'enemy_speed_z'], x['speed_connect_cos'], x[
'enemy_speed_connect_cos']),
axis=1)
# 筛除不能开火标签(两机距离大于1000或背对)
data['label'] = data.apply(
lambda x: 0 if x['speed_connect_cos'] < 0 or x[
'distance'] > 1000 else x['label'],
axis=1)
data.fillna(0, inplace=True)
data.dropna(inplace=True)
data.to_csv('a2a_fe.csv', index=False)
return data
if self.mode == 'online':
if self.scale == 'all':
# 计算我机速度
data = pd.concat([
data,
pd.DataFrame({
'speed_x': data['x'].diff(),
'speed_y': data['y'].diff(),
'speed_z': data['z'].diff()
})
],
sort=False,
axis=1)
data.fillna(0, inplace=True)
data[['speed_x', 'speed_y', 'speed_z'
]] = data[['speed_x', 'speed_y', 'speed_z']] / 0.05
data['speed'] = data.apply(lambda x: np.sqrt(x[
'speed_x']**2 + x['speed_y']**2 + x['speed_z']**2),
axis=1)
# 计算敌机速度
data = pd.concat([
data,
pd.DataFrame({
'enemy_speed_x': data['enemy_x'].diff(),
'enemy_speed_y': data['enemy_y'].diff(),
'enemy_speed_z': data['enemy_z'].diff()
})
],
sort=False,
axis=1)
data.fillna(0, inplace=True)
data[[
'enemy_speed_x', 'enemy_speed_y', 'enemy_speed_z'
]] = data[['enemy_speed_x', 'enemy_speed_y', 'enemy_speed_z'
]] / 0.05
data['enemy_speed'] = data.apply(
lambda x: np.sqrt(x['enemy_speed_x']**2 + x[
'enemy_speed_y']**2 + x['enemy_speed_z']**2),
axis=1)
# 计算敌我距离
data['distance'] = data.apply(lambda x: np.sqrt(
(x['x'] - x['enemy_x'])**2 + (x['y'] - x['enemy_y'])**2 +
(x['z'] - x['enemy_z'])**2),
axis=1)
# 计算我机速度与敌我连线夹角余弦值
data['speed_connect_cos'] = data.apply(
lambda x: self._caculate_speed_connect_cos(
x['x'], x['y'], x['z'], x['enemy_x'], x['enemy_y'], x[
'enemy_z'], x['speed_x'], x['speed_y'], x['speed_z'
]),
axis=1)
# 计算敌机速度与敌我连线夹角余弦值
data['enemy_speed_connect_cos'] = data.apply(
lambda x: self._caculate_speed_connect_cos(
x['x'], x['y'], x['z'], x['enemy_x'], x['enemy_y'], x[
'enemy_z'], x['enemy_speed_x'], x['enemy_speed_y'],
x['enemy_speed_z']),
axis=1)
# 计算两机速度夹角余弦值
data['speed_cos'] = data.apply(
lambda x: self._caculate_speed_cos(x['speed_x'], x[
'speed_y'], x['speed_z'], x['enemy_speed_x'], x[
'enemy_speed_y'], x['enemy_speed_z']),
axis=1)
# 我机朝向处理
data['Heading'] = data['Heading'] % 360
# 计算相对位置与速度
for f in [
'x', 'y', 'z', 'speed_x', 'speed_y', 'speed_z', 'speed'
]:
data[f'relative_{f}'] = data[f'enemy_{f}'] - data[f'{f}']
# 筛除不能开火标签(两机距离大于1000或背对)
data['label'] = data.apply(
lambda x: 0 if x['speed_connect_cos'] < 0 or x[
'distance'] > 1000 else x['label'],
axis=1)
data.fillna(0, inplace=True)
data.dropna(inplace=True)
data.to_csv('a2a_fe.csv', index=False)
return data
elif self.scale == 'light':
# 计算我机速度
data = pd.concat([
data,
pd.DataFrame({
'speed_x': data['x'].diff(),
'speed_y': data['y'].diff(),
'speed_z': data['z'].diff()
})
],
sort=False,
axis=1)
data.fillna(0, inplace=True)
data[['speed_x', 'speed_y', 'speed_z'
]] = data[['speed_x', 'speed_y', 'speed_z']] / 0.05
data['speed'] = data.apply(lambda x: np.sqrt(x[
'speed_x']**2 + x['speed_y']**2 + x['speed_z']**2),
axis=1)
# 计算敌机速度
data = pd.concat([
data,
pd.DataFrame({
'enemy_speed_x': data['enemy_x'].diff(),
'enemy_speed_y': data['enemy_y'].diff(),
'enemy_speed_z': data['enemy_z'].diff()
})
],
sort=False,
axis=1)
data.fillna(0, inplace=True)
data[[
'enemy_speed_x', 'enemy_speed_y', 'enemy_speed_z'
]] = data[['enemy_speed_x', 'enemy_speed_y', 'enemy_speed_z'
]] / 0.05
data['enemy_speed'] = data.apply(
lambda x: np.sqrt(x['enemy_speed_x']**2 + x[
'enemy_speed_y']**2 + x['enemy_speed_z']**2),
axis=1)
# 计算敌我距离
data['distance'] = data.apply(lambda x: np.sqrt(
(x['x'] - x['enemy_x'])**2 + (x['y'] - x['enemy_y'])**2 +
(x['z'] - x['enemy_z'])**2),
axis=1)
# 计算我机速度与敌我连线夹角余弦值
data['speed_connect_cos'] = data.apply(
lambda x: self._caculate_speed_connect_cos(
x['x'], x['y'], x['z'], x['enemy_x'], x['enemy_y'], x[
'enemy_z'], x['speed_x'], x['speed_y'], x['speed_z'
]),
axis=1)
# 计算敌机速度与敌我连线夹角余弦值
data['enemy_speed_connect_cos'] = data.apply(
lambda x: self._caculate_speed_connect_cos(
x['x'], x['y'], x['z'], x['enemy_x'], x['enemy_y'], x[
'enemy_z'], x['enemy_speed_x'], x['enemy_speed_y'],
x['enemy_speed_z']),
axis=1)
# 计算两机速度夹角余弦
data['speed_cos'] = data.apply(
lambda x: self._caculate_speed_cos(x['speed_x'], x[
'speed_y'], x['speed_z'], x['enemy_speed_x'], x[
'enemy_speed_y'], x['enemy_speed_z']),
axis=1)
# 计算相对位置
for f in ['y', 'speed']:
data[f'relative_{f}'] = data[f'enemy_{f}'] - data[f'{f}']
# 筛除不能开火标签(两机距离大于1000或背对)
data['label'] = data.apply(
lambda x: 0 if x['speed_connect_cos'] < 0 or x[
'distance'] > 1000 else x['label'],
axis=1)
data.fillna(0, inplace=True)
data.dropna(inplace=True)
data.to_csv('a2a_fe.csv', index=False)
return data
# DCS在线特征工程
def online_FE_DCS(self, row_dict):
""" DCS在线特征工程
Args:
row_dict(dict): 传入的一行字典记录
Returns:
DataFrame: 加入特征后的记录dataframe
"""
# 字典转dataframe
row = pd.DataFrame(row_dict, index=[0])
# 调用离线特征工程函数
FE_row = self.FE_DCS(row)
return FE_row
# DCS在线特征工程
def online_FE_DCS_new(self, row_dict):
""" AtoA在线特征工程
Args:
row_dict(dict): 传入的一行字典记录
Returns:
DataFrame: 加入特征后的记录dataframe
"""
row = pd.DataFrame(row_dict, index=[0])
self.current_row = row
if self.pre_row is None:
FE_row = self.online_FE_DCS(self.current_row)
else:
window = pd.concat([self.pre_row, self.current_row], axis=0)
FE_row = self.online_FE_DCS(window)[-1:]
self.pre_row = self.current_row
return FE_row
def train_val_split(self, df_train, percent=0.8):
""" 数据集划分
划分数据集为训练集与测试
Args:
df_train(dataframe): 原始数据
percent(int): 切分比例
Returns:
train(dataframe): 训练集
val_data(dataframe): 验证集
"""
# 获取所有id
all_ids = df_train['id'].values.tolist()
# id去重
all_ids = list(set(all_ids))
# 每次 set 的结果都不一样,所以要先排序,防止结果不可复现
all_ids.sort()
# random.seed 只能生效一次,所以每次 random.sample 之前都要设置
random.seed(self.seed)
# 训练集id采样
train_ids = random.sample(all_ids, int(len(all_ids) * percent))
# 获取验证集id
val_ids = list(set(all_ids) - set(train_ids))
# 根据id获取训练数据
train_data = df_train[df_train['id'].isin(train_ids)]
# 根据id获取验证数据
val_data = df_train[df_train['id'].isin(val_ids)]
# 连续序列数据,但是是以单个样本建模的情况下,需要 shuffle 打乱
train_data = train_data.sample(
frac=1, random_state=self.seed).reset_index(drop=True)
return train_data, val_data
def smote(self, data_):
data = data_.copy()
over = SMOTE(sampling_strategy=0.2, random_state=self.seed)
under = RandomUnderSampler(sampling_strategy=1.0,
random_state=self.seed)
steps = [('o', over), ('u', under)]
pipeline = Pipeline(steps=steps)
X, y = pipeline.fit_resample(
data[[i for i in data.columns if i not in ['label']]],
data['label'])
return pd.concat([X, y], axis=1)
def _feature_name(self):
""" 获取保留列名
Returns:
feature_names(list): 列名信息
"""
# 固定顺序,否则模型预测会出错
if self.type == 'df':
if self.scale == 'all':
feature_names = [
'my_x', 'my_y', 'my_z', 'my_v_x', 'my_v_y', 'my_v_z',
'my_rot_x', 'my_rot_y', 'my_rot_z', 'enemy_x', 'enemy_y',
'enemy_z', 'enemy_v_x', 'enemy_v_y', 'enemy_v_z',
'my_v_x_acc', 'enemy_v_x_acc', 'my_v_y_acc',
'enemy_v_y_acc', 'my_v_z_acc', 'enemy_v_z_acc',
'x_me_minus', 'y_me_minus', 'z_me_minus', 'v_x_me_minus',
'v_y_me_minus', 'v_z_me_minus', 'distance', 'cos',
'speedAll', 'cosValue'
]
else:
feature_names = ['cosValue', 'speedAll', 'distance']
elif self.type == 'dcs':
if self.scale == 'all':
feature_names = [
'z', 'Roll', 'Pitch', 'Yaw', 'x', 'y', 'Heading',
'enemy_z', 'enemy_x', 'enemy_y', 'speed_x', 'speed_y',
'speed_z', 'enemy_speed_x', 'enemy_speed_y',
'enemy_speed_z', 'distance', 'speed', 'speed_connect_cos',
'enemy_speed_connect_cos', 'relative_x', 'relative_z',
'relative_y', 'relative_speed_x', 'relative_speed_y',
'relative_speed_z', 'relative_speed', 'speed_cos'
]
elif self.scale == 'light':
feature_names = [
'distance', 'speed_connect_cos', 'enemy_speed_connect_cos',
'relative_y', 'speed_cos'
]
else:
feature_names = [
'z', 'Roll', 'Pitch', 'Yaw', 'x', 'y', 'Heading',
'enemy_z', 'enemy_x', 'enemy_y'
]
return feature_names
# 留出法数据
def _hold_out(self, raw_train, percent_train):
""" 获取留出法的训练数据
Args:
raw_train(dataframe): 原始数据
percent_train(int): 训练集占比
Returns:
train(dataframe): 训练集
val(dataframe): 验证集
"""
# 获取保留的列名
feature_names = self._feature_name()
# 切分训练集、验证集
train_data, val_data = self.train_val_split(raw_train,
percent=percent_train)
if self.type == 'dcs':
train_data = self.smote(train_data)
# 获取训练验证数据和标签数据
X_train, X_val, y_train, y_val = train_data[feature_names], val_data[
feature_names], train_data['label'], val_data['label']
return X_train, X_val, y_train, y_val
# k折交叉验证数据
def _k_fold(self, raw_train, k):
""" 获取交叉验证数据
Args:
raw_train(dataframe): 原始数据
k(int): 交叉折数
Returns:
train(dataframe): k折交叉验证的训练集
val(dataframe): 验证集
"""
# 获取保留列名
feature_names = self._feature_name()
# 根据id分组
groups = list(raw_train['id'])
# 分组交叉验证
gkf = GroupKFold(n_splits=k)
data_list = []
# 获取交叉验证数据
for train_index, val_index in gkf.split(raw_train[feature_names],
raw_train['label'],
groups=groups):
# 根据index索引获取每一折数据
X_train, y_train, X_val, y_val = raw_train.iloc[train_index][feature_names], \
raw_train.iloc[train_index]['label'], \
raw_train.iloc[val_index][feature_names], \
raw_train.iloc[val_index]['label']
# 将数据加入列表保存
data_list.append([X_train, X_val, y_train, y_val])
# 返回列表
return data_list
def _bootstrap(self, raw_train):
""" 获取提升法数据
Args:
raw_train(dataframe): 原始数据
Returns:
train(dataframe): 提升法训练集
val(dataframe): 验证集
"""
# 获取保留列名
feature_names = self._feature_name()
# 获取所有数据id,并去重
ids = pd.DataFrame(set(raw_train['id']), columns=['id'], index=None)
random.seed(self.seed)
# 根据id采样
train_group_ids = ids.sample(frac=1.0,
replace=True,
random_state=self.seed)
# 总id减去训练集的id,得到验证集id
val_group_ids = ids.loc[ids.index.difference(
train_group_ids.index)].copy()
# 创建两个dataframe
train_data = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
This function creates kymographs from a stack of images.
By <NAME> 2020
"""
from skimage import io, measure
import matplotlib.pyplot as plt
import numpy as np
from cell_segmentation import cell_seg_no_cell_crop
import statistics
from matplotlib import gridspec
import pandas as pd
import os
def kymo_generator(image, fname, save_data, interval, pixel_size, bit_depth, small_obj = 1000, save_destination = os.path.dirname(__file__)):
"""
This function takes an image, generates four kymographs, and analyze them.
Parameters
----------
image : array
An input image.
fname : string
The filename.
save_data : boolean
Whether to save the data.
interval : integer
The interval at which images were acquired (e.g. every 5 seconds)
pixel_size : integer
The pixel size of the image.
bit_depth : integer
The bit depth of the image.
small_obj : integer, optional
The smallest object allowed. The default is 1000 pixels.
save_destination : string, optional
The saving directory. The default is os.path.dirname(__file__).
Returns
-------
A confirmation note "done".
"""
all_cell_masks, all_cell_props = cell_seg_no_cell_crop(image, filename = fname, DEPTH = bit_depth, small_obj = small_obj,
show_img = False, save_contour = False)
y, x = all_cell_props[0][-1].centroid
y = int(y)
x = int(x)
kymo_1 = np.empty((y+1,all_cell_masks[0].shape[0]))
kymo_2 = np.empty((all_cell_masks[0].shape[1]-y,all_cell_masks[0].shape[0]))
kymo_3 = np.empty((x+1,all_cell_masks[0].shape[0]))
kymo_4 = np.empty((all_cell_masks[0].shape[2]-x,all_cell_masks[0].shape[0]))
width = 3
all_kymos = []
for slice_number in range (all_cell_masks[0].shape[0]):
profile_line_1 = measure.profile_line(all_cell_masks[0][slice_number, :, :], src=(y, x), dst=(0, x), linewidth=width, mode='constant')
kymo_1[:,slice_number] = np.flip(profile_line_1, axis=0)
profile_line_2 = measure.profile_line(all_cell_masks[0][slice_number, :, :], src=(y, x), dst=(all_cell_masks[0][slice_number, :, :].shape[0]-1, x), linewidth=width, mode='constant')
kymo_2[:,slice_number] = np.flip(profile_line_2, axis=0)
profile_line_3 = measure.profile_line(all_cell_masks[0][slice_number, :, :], src=(y, x), dst=(y, 0), linewidth=width, mode='constant')
kymo_3[:,slice_number] = np.flip(profile_line_3, axis=0)
profile_line_4 = measure.profile_line(all_cell_masks[0][slice_number, :, :], src=(y, x), dst=(y, all_cell_masks[0][slice_number, :, :].shape[1]-1), linewidth=width, mode='constant')
kymo_4[:,slice_number] = np.flip(profile_line_4, axis=0)
all_kymos.append(kymo_1)
all_kymos.append(kymo_2)
all_kymos.append(kymo_3)
all_kymos.append(kymo_4)
del kymo_1, kymo_2, kymo_3, kymo_4 # to save memory
from kymo_to_coords import kymo_to_coords
all_normalized_coords = []
all_filtered_coords = []
for n in range(len(all_kymos)):
normalized, filtered_coords = kymo_to_coords(all_kymos[n], thres=15, pixel_length = 0.1833333)
all_normalized_coords.append(normalized)
all_filtered_coords.append(filtered_coords)
################################dividing line###########################################
from measure_protrusions import measure_protrusions
all_plateau_idx = []
all_minimas = []
all_retraction_rate = []
all_avg_speed = []
all_lowest_point_idx = []
print(fname + ' results')
print('----------------------------------------')
for n in range(len(all_normalized_coords)):
lowest_point_idx, plateau_idx, minima, retraction_rate, avg_speed = measure_protrusions(normalized_coords = all_normalized_coords[n], frame_rate = interval)
all_plateau_idx.append(plateau_idx)
all_minimas.append(minima)
all_retraction_rate.append(retraction_rate)
all_avg_speed.append(avg_speed)
all_lowest_point_idx.append(lowest_point_idx)
all_avg_speed_avg = statistics.mean(all_avg_speed)
all_avg_speed_stdev = statistics.stdev(all_avg_speed)
all_retraction_rate_avg = statistics.mean(all_retraction_rate)
all_retraction_rate_stdev = statistics.stdev(all_retraction_rate)
print('----------------------------------------')
print('Average retraction rate of all kymos = ' + str(round(all_retraction_rate_avg, 3))+ ' ± ' + str(round(all_retraction_rate_stdev,2)))
print('Average protrusion speed of all kymos = ' + str(round(all_avg_speed_avg, 2))+ ' ± ' + str(round(all_avg_speed_stdev,2)))
################################dividing line###########################################
color_1 = '#003f5c'
color_2 = '#7a5195'
color_3 = '#ef5675'
color_4 = '#ffa600'
fig = plt.figure(figsize=(20, 10)) # 20 in x and 10 in y
gs = gridspec.GridSpec(2, 4) # 2 in x and 4 in y
axes0 = plt.subplot(gs[:,0:2])
axes0.imshow(image[-1,:,:], cmap='Greys')
axes0.plot([x, x], [y, 0], color_1, [x, x], [y, all_cell_masks[0][slice_number, :, :].shape[0]-1], color_2,
[x, 0], [y, y], color_3, [x, all_cell_masks[0][slice_number, :, :].shape[1]-1], [y, y], color_4, linewidth = width, linestyle='dashed')
axes0.axis('off')
###################
axes1 = plt.subplot(gs[0,2])
x_axis = np.linspace(start = 0, stop = int((len(all_normalized_coords[0])-1)*interval), num = len(all_normalized_coords[0]))
axes1.plot(x_axis, all_normalized_coords[0], 'k')
last_slope_point_0 = x_axis[all_plateau_idx[0]]
axes1.plot([x_axis[all_lowest_point_idx[0]], last_slope_point_0], [all_normalized_coords[0][all_lowest_point_idx[0]],
all_normalized_coords[0][all_plateau_idx[0]]], color_1, linewidth = width/2, linestyle='dashed', label='Protrusion speed')
# plot retraction points
axes1.scatter(all_minimas[0]*interval, [all_normalized_coords[0][n] for n in all_minimas[0]], s=20, c='r', label='Retraction')
axes1.legend(loc="lower right")
for spine in axes1.spines.values():
spine.set_edgecolor(color_1)
spine.set_linewidth(3)
axes1.set_ylabel('Distance (µm)')
axes1.set_ylim(top = int(np.max(all_normalized_coords)+2)) #limit y axis to be the maximum of all the numbers
###################
axes2 = plt.subplot(gs[0,3], sharex=axes1, sharey=axes1)
x_axis = np.linspace(start = 0, stop = int((len(all_normalized_coords[1])-1)*interval), num = len(all_normalized_coords[1]))
axes2.plot(x_axis, all_normalized_coords[1], 'k')
last_slope_point_1 = x_axis[all_plateau_idx[1]]
axes2.plot([x_axis[all_lowest_point_idx[1]], last_slope_point_1], [all_normalized_coords[1][all_lowest_point_idx[1]],
all_normalized_coords[1][all_plateau_idx[1]]], color_2, linewidth = width/2, linestyle='dashed', label='Protrusion speed')
axes2.scatter(all_minimas[1]*interval, [all_normalized_coords[1][n] for n in all_minimas[1]], s=20, c='r', label='Retraction')
axes2.legend(loc="lower right")
for spine in axes2.spines.values():
spine.set_edgecolor(color_2)
spine.set_linewidth(3)
###################
axes3 = plt.subplot(gs[1,2], sharex=axes1, sharey=axes1)
x_axis = np.linspace(start = 0, stop = int((len(all_normalized_coords[2])-1)*interval), num = len(all_normalized_coords[2]))
axes3.plot(x_axis, all_normalized_coords[2], 'k')
last_slope_point_2 = x_axis[all_plateau_idx[2]]
axes3.plot([x_axis[all_lowest_point_idx[2]], last_slope_point_2], [all_normalized_coords[2][all_lowest_point_idx[2]],
all_normalized_coords[2][all_plateau_idx[2]]], color_3, linewidth = width/2, linestyle='dashed', label='Protrusion speed')
axes3.scatter(all_minimas[2]*interval, [all_normalized_coords[2][n] for n in all_minimas[2]], s=20, c='r', label='Retraction')
axes3.legend(loc="lower right")
for spine in axes3.spines.values():
spine.set_edgecolor(color_3)
spine.set_linewidth(3)
axes3.set_xlabel('Time (s)')
axes3.set_ylabel('Distance (µm)')
###################
axes4 = plt.subplot(gs[1,3], sharex=axes1, sharey=axes1)
x_axis = np.linspace(start = 0, stop = int((len(all_normalized_coords[3])-1)*interval), num = len(all_normalized_coords[3]))
axes4.plot(x_axis, all_normalized_coords[3], 'k')
last_slope_point_3 = x_axis[all_plateau_idx[3]]
axes4.plot([x_axis[all_lowest_point_idx[3]], last_slope_point_3], [all_normalized_coords[3][all_lowest_point_idx[3]],
all_normalized_coords[3][all_plateau_idx[3]]], color_4, linewidth = width/2, linestyle='dashed', label='Protrusion speed')
axes4.scatter(all_minimas[3]*interval, [all_normalized_coords[3][n] for n in all_minimas[3]], s=20, c='r', label='Retraction')
axes4.legend(loc="lower right")
for spine in axes4.spines.values():
spine.set_edgecolor(color_4)
spine.set_linewidth(3)
axes4.set_xlabel('Time (s)')
plt.show()
################################dividing line###########################################
if save_data:
df = pd.DataFrame()
df[fname + ' Kymo_1'] = pd.Series(all_normalized_coords[0])
df[fname + ' Kymo_1' + ' retraction pts'] = pd.Series(all_minimas[0]*interval)
df[fname + ' Kymo_1' + ' plateau idx'] = pd.Series(all_plateau_idx[0])
df[fname + ' Kymo_1' + ' protrusion beginning idx'] = pd.Series(all_lowest_point_idx[0])
df[fname + ' Kymo_2'] = pd.Series(all_normalized_coords[1])
df[fname + ' Kymo_2' + ' retraction pts'] = | pd.Series(all_minimas[1]*interval) | pandas.Series |
import unittest
class TestLearn(unittest.TestCase):
def test_ewm_covariance_vs_pandas(self):
import numpy as np
import pandas as pd
from pyonline.covariance import Covariance
span = 3
random_state = np.random.RandomState(3)
X = random_state.normal(.1, .324, (100, 10))
candidate = Covariance(span=span).fit(X).covariance_
true = | pd.DataFrame(X) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
@file
@brief Various function to download data about population
"""
import gzip
import os
import numpy
import pandas
from pyquickhelper.loghelper import noLOG
from pyensae.datasource import download_data
from .data_exceptions import DataFormatException
def population_france_year(url="https://www.insee.fr/fr/statistiques/fichier/1892086/pop-totale-france.xls",
sheet_name=0, year=2020):
"""
Downloads the data for the French population from INSEE website
@param url url
@param sheet_name sheet index
@param year last year to find
@return DataFrame
The sheet index is 0 for the all France, 1 for metropolitean France.
The last row aggregates multiple ages ``1914 ou avant``, they will remain
aggregated but the label will be changed to 1914. ``100 ou plus`` is replaced by 100.
By default, the data is coming from `INSEE, Bilan Démographique <https://www.insee.fr/fr/statistiques/1892086?sommaire=1912926>`_.
**2017/01**: pandas does not seem to be able to read the format (old format).
You should convert the file in txt with Excel.
"""
try:
df = pandas.read_excel(url, sheet_name=sheet_name)
skiprows = 5
except Exception as e: # pragma: no cover
# we try to find a local version
this = os.path.dirname(__file__)
name = os.path.join(this, "data_population", url.split(
"/")[-1].replace(".xls", ".xlsx"))
if not os.path.exists(name):
raise FileNotFoundError(
"Unable to find a replacement for '{0}' as '{1}'".format(url, name)) from e
df = pandas.read_excel(name, sheet_name=sheet_name)
url = name
skiprows = 0
col = df.columns[0]
if len(col) == 0:
raise DataFormatException( # pragma: no cover
"Unable to find {0} (year) in table at url '{1}'".format(year, url))
if skiprows > 0 and str(year) not in col:
raise DataFormatException( # pragma: no cover
"Unable to find {0} (year) in first column name '{1}' at url "
"'{2}'".format(year, col, url))
table = pandas.read_excel(url, sheet_name=sheet_name, skiprows=skiprows)
table.columns = ["naissance", "age", "hommes", "femmes", "ensemble"]
table = table[(table.naissance != 'Champ : France y c. Mayotte.') &
table.naissance.apply(lambda s: "Source" not in str(s))].copy()
table["naissance"] = table.apply(lambda r: r["naissance"] if isinstance(r["naissance"], (int, float)) else
r["naissance"].replace(" ou avant", ""), axis=1)
table["age"] = table.apply(lambda r: r["age"] if isinstance(r["age"], (int, float)) else
r["age"].replace(" ou plus", "") if isinstance(
r["age"], str) else r["age"],
axis=1)
table = table.dropna(axis=0)
for c in table.columns:
table[c] = table[c].astype(int)
return table
def table_mortalite_france_00_02(homme=None, femme=None):
"""
Download mortality table for France assuming they
are available in Excel format.
@param homme table for men
@param femme table for women
@return DataFrame
The final DataFrame merges both sheets.
The data is coming from
`Institut des Actuaires: Reférences de mortalité <http://www.institutdesactuaires.com/gene/main.php?base=2127>`_ or
`Références techniques <http://www.ressources-actuarielles.net/EXT/ISFA/fp-isfa.nsf/
34a14c286dfb0903c1256ffd00502d73/d62719e329025b94c12577c100545bb7?OpenDocument>`_.
"""
this = os.path.join(os.path.abspath(
os.path.dirname(__file__)), "data_population")
if homme is None:
homme = os.path.join(this, "TH00-02_D.xls")
sheeth = "Table"
else:
sheeth = 0
if femme is None:
femme = os.path.join(this, "TF00-02_D.xls")
sheetf = "Table"
else:
sheetf = 0
isexch = os.path.splitext(homme)[-1] in (".xls", ".xlsx")
dfh = pandas.read_excel(
homme, sheet_name=sheeth) if isexch else pandas.read_csv(homme, sep=";")
if dfh.shape[1] > 2:
dfh = dfh[dfh.columns[:2]]
isexcf = os.path.splitext(femme)[-1] in (".xls", ".xlsx")
dff = pandas.read_excel(
femme, sheet_name=sheetf) if isexcf else pandas.read_csv(femme, sep=";")
if dff.shape[1] > 2:
dff = dff[dff.columns[:2]]
df = dfh.merge(dff, on="Age")
df.columns = ["Age", "Homme", "Femme"]
return df.dropna().reset_index(drop=True)
def fecondite_france(url=None):
"""
download fecondity table for France (Excel format)
@param url source (url or file)
@return DataFrame
By default, the data is coming from a local file
which is a copy of
`INSEE: Fécondité selon l'âge détaillé de la mère <https://www.insee.fr/fr/statistiques/2045366?sommaire=2045470&q=fecondite>`_.
The original file cannot be read by pandas so we convert it first.
See also `INSEE Bilan Démographique 2016 <https://www.insee.fr/fr/statistiques/1892259?sommaire=1912926>`_.
"""
if url is None:
this = os.path.abspath(os.path.dirname(__file__))
url = os.path.join(this, "data_population", "irsocsd2014_G10.xlsx")
df = pandas.read_excel(url)
col = df.columns[0]
df[col] = df.apply(lambda r: r[col] if isinstance(r[col], int) else
r[col].replace(" ou plus", "").replace(" ans", "").replace(" (p)", "") if isinstance(
r[col], str) else r[col],
axis=1)
df = df[df[col].apply(lambda x: "0" <= x[0] <= "9" if isinstance(
x, str) else (isinstance(x, (int, float))))].copy()
df[col] = df[col].astype(float)
cp = df[df[col] == 15]
if len(cp) == 0:
ages = [str(_) for _ in set(df[col])] # pragma: no cover
raise DataFormatException( # pragma: no cover
"Unable to find 15 (age) in table at url: {0}\n{1}".format(
url, "\n".join(ages)))
if len(cp) != 1:
raise DataFormatException( # pragma: no cover
"too many values 15 in table at url: " + url)
cpe = df[df[col] == 50]
if len(cpe) == 0:
raise DataFormatException( # pragma: no cover
"Unable to find 50 (age) in table at url: " + url)
if len(cpe) != 1:
raise DataFormatException( # pragma: no cover
"Too many values 50 in table at url: " + url)
ind = cp.index[0]
ind2 = cpe.index[0]
table = df.iloc[ind:ind2, :3].copy()
table.columns = ["age", "2005", "2015"]
for c in table.columns:
table[c] = table[c].astype(float)
return table
def table_mortalite_euro_stat(url="http://ec.europa.eu/eurostat/estat-navtree-portlet-prod/BulkDownloadListing?file=data/",
name="demo_mlifetable.tsv.gz", final_name="mortalite.txt",
whereTo=".", stop_at=None, fLOG=noLOG):
"""
This function retrieves mortality table from `EuroStat <http://ec.europa.eu/eurostat/fr>`_ through
`table de mortalité <http://www.data-publica.com/opendata/7098--population-et-conditions-sociales-table-de-mortalite-de-1960-a-2010>`_
(*this link is currently broken, data-publica does not provide such a database anymore, a copy is provided*).
@param url data source
@param name data table name
@param final_name the data is compressed, it needs to be uncompressed
into a file, this parameter defines its name
@param whereTo data needs to be downloaded, location of this place
@param stop_at the overall process is quite long, if not None,
it only keeps the first rows
@param fLOG logging function
@return data_frame
The function checks the file final_name exists.
If it is the case, the data is not downloaded twice.
The header contains a weird format as coordinates are separated by a comma::
indic_de,sex,age,geo\time 2013 2012 2011 2010 2009
We need to preprocess the data to split this information into columns.
The overall process takes 4-5 minutes, 10 seconds to download (< 10 Mb),
4-5 minutes to preprocess the data (it could be improved). The processed data
contains the following columns::
['annee', 'valeur', 'age', 'age_num', 'indicateur', 'genre', 'pays']
Columns *age* and *age_num* look alike. *age_num* is numeric and is equal
to *age* except when *age_num* is 85. Everybody above that age fall
into the same category. The table contains many indicators:
* PROBSURV: Probabilité de survie entre deux âges exacts (px)
* LIFEXP: Esperance de vie à l'âge exact (ex)
* SURVIVORS: Nombre des survivants à l'âge exact (lx)
* PYLIVED: Nombre d'années personnes vécues entre deux âges exacts (Lx)
* DEATHRATE: Taux de mortalité à l'âge x (Mx)
* PROBDEATH: Probabilité de décès entre deux âges exacts (qx)
* TOTPYLIVED: Nombre total d'années personne vécues après l'âge exact (Tx)
"""
if os.path.exists(final_name) and os.stat(final_name).st_size > 1e7:
return final_name
temp = final_name + ".remove.txt"
if not os.path.exists(temp) or os.stat(temp).st_size < 1e7:
local = download_data(name, url=url, whereTo=whereTo)
local = local[0] + ".gz"
with gzip.open(local, 'rb') as f:
file_content = f.read()
content = str(file_content, encoding="utf8")
with open(temp, "w", encoding="utf8") as f:
f.write(content)
def format_age(s):
if s.startswith("Y_"):
if s.startswith("Y_LT"): # pragma: no cover
return "YLT" + s[4:]
if s.startswith("Y_GE"): # pragma: no cover
return "YGE" + s[4:]
raise SyntaxError(s) # pragma: no cover
i = int(s.strip("Y"))
return "Y%02d" % i
def format_age_num(s):
if s.startswith("Y_"):
if s.startswith("Y_LT"): # pragma: no cover
return float(s.replace("Y_LT", ""))
if s.startswith("Y_GE"): # pragma: no cover
return float(s.replace("Y_GE", ""))
raise SyntaxError(s) # pragma: no cover
i = int(s.strip("Y"))
return float(i)
def format_value(s):
if s.strip() == ":":
return numpy.nan
return float(s.strip(" ebp"))
fLOG("step 0, reading")
dff = | pandas.read_csv(temp, sep="\t", encoding="utf8") | pandas.read_csv |
# python 3
import matplotlib
# matplotlib.use('pgf')
# pgf_with_pdflatex = {
# "pgf.texsystem": "pdflatex",
# "pgf.preamble": [
# r"\usepackage[utf8x]{inputenc}",
# r"\usepackage[T1]{fontenc}",
# r"\usepackage{cmbright}",
# ]
# }
# matplotlib.rcParams.update(pgf_with_pdflatex)
import pandas
import re
from matplotlib2tikz import save as tikz_save
import numpy
from matplotlib import pyplot
matplotlib.style.use('ggplot')
pyplot.interactive(False)
def to_min_secs(x, pos):
x = int(x)
minutes = x // 60
seconds = x % 60
return '{:02d}:{:02d}'.format(minutes, seconds)
def get_speed_stats(speed_data_path):
df = pandas.read_csv(speed_data_path, sep=',', thousands=',')
try:
node_nr = re.search('ProvidenciaExampleScenario.(.+?).veinsmobility.speed', df.columns[1]).group(1)
except AttributeError:
node_nr = '??' # apply your error handling
df.columns = ['time', 'speed']
mean = df['speed'].mean()
std = df['speed'].std()
return (node_nr, mean, std)
def build_dataframe_case(case):
# mobility data
mobility_columns = ['module', 'max_speed', 'min_speed', 'start_time', 'stop_time',
'total_co2', 'total_dist', 'total_time']
case_df_mobility = pandas.read_csv(case + '_stats_veinsmobility.csv')
case_df_mobility.columns = mobility_columns
mobility_search_re = 'ProvidenciaExampleScenario.(.+?).veinsmobility'
case_df_mobility['module'] = case_df_mobility['module'].map(lambda x: re.search(mobility_search_re, x).group(1))
case_df_mobility.set_index(['module'], inplace=True)
# appl data (sent warnings, arrived at dest)
appl_columns = ['module', 'arrived', 'rcvd_warnings', 'sent_warnings']
case_df_appl = pandas.read_csv(case + '_stats_appl.csv')
case_df_appl.columns = appl_columns
appl_search_re = 'ProvidenciaExampleScenario.(.+?).appl'
case_df_appl['module'] = case_df_appl['module'].map(lambda x: re.search(appl_search_re, x).group(1))
case_df_appl['arrived'] = case_df_appl['arrived'].map({1: True, 0: False})
case_df_appl.set_index(['module'], inplace=True)
case_df_speed = pandas.DataFrame()
case_df_speed['mean_speed'] = case_df_mobility['total_dist'] / case_df_mobility['total_time']
# join all tables
case_df = pandas.merge(case_df_mobility, case_df_appl, left_index=True, right_index=True, how='outer')
case_df = pandas.merge(case_df, case_df_speed, left_index=True, right_index=True, how='outer')
return case_df
def buid_csv():
for case in ['per0.0', 'per1.0', 'base_case', 'per0.5', 'per0.75', 'per0.25']:
df = build_dataframe_case(case)
df.to_csv(case + '_total_stats.csv')
def analysis_arrived_vhc():
per00 = pandas.read_csv('per0.0_total_stats.csv').set_index(['module'])
per10 = pandas.read_csv('per1.0_total_stats.csv').set_index(['module'])
base = pandas.read_csv('base_case_total_stats.csv').set_index(['module'])
per05 = pandas.read_csv('per0.5_total_stats.csv').set_index(['module'])
per075 = pandas.read_csv('per0.75_total_stats.csv').set_index(['module'])
per025 = pandas.read_csv('per0.25_total_stats.csv').set_index(['module'])
base_arrived_cnt = base['arrived'].sum()
per00_arrived_cnt = per00['arrived'].sum()
per10_arrived_cnt = per10['arrived'].sum()
per05_arrived_cnt = per05['arrived'].sum()
per075_arrived_cnt = per075['arrived'].sum()
per025_arrived_cnt = per025['arrived'].sum()
objects = ('Caso Base', 'PER 0.0', 'PER 0.25', 'PER 0.75', 'PER 1.0')
#objects = ('Caso Base', 'PER 0.0', 'PER 1.0')
x_ax = numpy.arange(len(objects))
#bars = [base_arrived_cnt, per00_arrived_cnt, per025_arrived_cnt,
# per05_arrived_cnt, per075_arrived_cnt, per10_arrived_cnt]
bars = [base_arrived_cnt, per00_arrived_cnt, per025_arrived_cnt, per075_arrived_cnt, per10_arrived_cnt]
pyplot.bar(x_ax, bars)
#pyplot.yscale('log')
pyplot.yticks(bars)
pyplot.xticks(x_ax, objects)
for a, b in zip(x_ax, bars):
pyplot.text(a, b, str(b))
#pyplot.ylabel('N° de vehículos que alcanzaron su destino')
pyplot.title('N° de vehículos que alcanzaron su destino (escala log)')
pyplot.show()
def analysis_speed():
per00 = pandas.read_csv('per0.0_total_stats.csv').set_index(['module'])
per10 = pandas.read_csv('per1.0_total_stats.csv').set_index(['module'])
base = pandas.read_csv('base_case_total_stats.csv').set_index(['module'])
per05 = pandas.read_csv('per0.5_total_stats.csv').set_index(['module'])
per075 = pandas.read_csv('per0.75_total_stats.csv').set_index(['module'])
per025 = pandas.read_csv('per0.25_total_stats.csv').set_index(['module'])
y = [base.loc[base['arrived'] == False]['mean_speed'].mean(),
per00.loc[per00['arrived'] == False]['mean_speed'].mean(),
per025.loc[per025['arrived'] == False]['mean_speed'].mean(),
per05.loc[per05['arrived'] == False]['mean_speed'].mean(),
per075.loc[per075['arrived'] == False]['mean_speed'].mean(),
per10.loc[per10['arrived'] == False]['mean_speed'].mean()]
objects = ('Caso Base', 'PER 0.0', 'PER 0.25', 'PER 0.5', 'PER 0.75', 'PER 1.0')
x = numpy.arange(len(objects))
pyplot.bar(x, y)
pyplot.yscale('log')
#pyplot.yticks(y)
pyplot.xticks(x, objects)
pyplot.ylabel('Velocidad m/s')
pyplot.title('Velocidades promedio de vehículos que NO alcanzaron su destino.')
for a, b in zip(x, y):
pyplot.text(a, b, str(b))
pyplot.show()
def analysis_distance():
per00 = pandas.read_csv('per0.0_total_stats.csv').set_index(['module'])
per10 = pandas.read_csv('per1.0_total_stats.csv').set_index(['module'])
base = pandas.read_csv('base_case_total_stats.csv').set_index(['module'])
per05 = pandas.read_csv('per0.5_total_stats.csv').set_index(['module'])
per075 = pandas.read_csv('per0.75_total_stats.csv').set_index(['module'])
per025 = pandas.read_csv('per0.25_total_stats.csv').set_index(['module'])
# filter
data = [base.loc[base['arrived'] != True]['total_dist'], per00.loc[per00['arrived'] != True]['total_dist'], per025.loc[per025['arrived'] != True]['total_dist'],
per05.loc[per05['arrived'] != True]['total_dist'], per075.loc[per075['arrived'] != True]['total_dist'], per10.loc[per10['arrived'] != True]['total_dist']]
labels = ['Caso Base', 'PER 0.0', 'PER 0.25',
'PER 0.5', 'PER 0.75', 'PER 1.0']
bins = numpy.linspace(0, base['total_dist'].max(), 50)
fig, axes = pyplot.subplots(nrows=2, ncols=3, sharey=True)
fig.suptitle("Frecuencia relativa de distancias recorridas - autos que NO llegaron a su destino.")
for idx, ax in enumerate(axes.ravel()):
x, y, _ = ax.hist(data[idx], bins, label=labels[idx], normed=True)
pyplot.setp(ax.get_yticklabels(), visible=True)
ax.legend(loc='upper right')
pyplot.show()
def analysis_time():
per00 = pandas.read_csv('per0.0_total_stats.csv').set_index(['module'])
per10 = pandas.read_csv('per1.0_total_stats.csv').set_index(['module'])
base = pandas.read_csv('base_case_total_stats.csv').set_index(['module'])
per05 = pandas.read_csv('per0.5_total_stats.csv').set_index(['module'])
per075 = pandas.read_csv('per0.75_total_stats.csv').set_index(['module'])
per025 = pandas.read_csv('per0.25_total_stats.csv').set_index(['module'])
# filter
data = [base.loc[base['arrived'] == False]['total_time'], per00.loc[per00['arrived'] == False]['total_time'],
per025.loc[per025['arrived'] == False]['total_time'],
per05.loc[per05['arrived'] == False]['total_time'], per075.loc[per075['arrived'] == False]['total_time'],
per10.loc[per10['arrived'] == False]['total_time']]
labels = ['Caso Base', 'PER 0.0', 'PER 0.25',
'PER 0.5', 'PER 0.75', 'PER 1.0']
bins = numpy.linspace(0, base['total_dist'].max(), 50)
fig, axes = pyplot.subplots(nrows=2, ncols=3)
for idx, ax in enumerate(axes.ravel()):
ax.hist(data[idx], bins, label=labels[idx], normed=True)
ax.legend(loc='upper right')
pyplot.show()
def per00_vs_per10_distancetime():
per00 = pandas.read_csv('per0.0_total_stats.csv').set_index(['module'])
per10 = pandas.read_csv('per1.0_total_stats.csv').set_index(['module'])
fig, ax = pyplot.subplots()
ax.set_facecolor('white')
ax.grid(color='#a1a1a1', linestyle='-', alpha=0.1)
ax.scatter(per00['total_dist'], per00['total_time'], marker='o', s=4, alpha=0.75, label='PER 0.0', color='#ff0000')
ax.scatter(per10['total_dist'], per10['total_time'], marker='o', s=4, alpha=0.75, label='PER 1.0', color='#33cc22')
ax.legend(loc='lower right')
formatter = matplotlib.ticker.FuncFormatter(to_min_secs)
ax.yaxis.set_major_formatter(formatter)
ax.yaxis.set_major_formatter(formatter)
pyplot.xlabel('Distancia Total [m]')
#pyplot.ylabel('Tiempo Total [MM:SS]')
ax.set_ylabel('Tiempo Total [MM:SS]')
ax.set_ylabel('Tiempo Total [MM:SS]')
#pyplot.savefig('per00per10_timedistance.pgf')
tikz_save('per00per10_timedistance.tex',
figureheight='\\figureheight',
figurewidth='\\figurewidth')
pyplot.show()
def per00_vs_per10_co2distance():
per00 = pandas.read_csv('per0.0_total_stats.csv').set_index(['module'])
per10 = pandas.read_csv('per1.0_total_stats.csv').set_index(['module'])
fig, ax = pyplot.subplots()
ax.set_facecolor('white')
ax.grid(color='#a1a1a1', linestyle='-', alpha=0.1)
ax.scatter(per00['total_dist'], per00['total_co2'], marker='o', s=4, alpha=0.75, label='PER 0.0', color='#ff0000')
ax.scatter(per10['total_dist'], per10['total_co2'], marker='o', s=4, alpha=0.75, label='PER 1.0', color='#33cc22')
ax.legend(loc='lower right')
#formatter = matplotlib.ticker.FuncFormatter(to_min_secs)
#ax.yaxis.set_major_formatter(formatter)
ax.set_ylabel('CO2 Total [g]')
pyplot.xlabel('Distancia Total')
#pyplot.savefig('per00per10_co2.pgf')
tikz_save('per00per10_co2.tex',
figureheight='\\figureheight',
figurewidth='\\figurewidth')
pyplot.show()
def per00_vs_per10_speedhist():
per00 = | pandas.read_csv('per0.0_total_stats.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""Timeseries Module
This module mirrors the Timeseries API. It allows you to fetch data from
the api and output it in various formats.
https://doc.cognitedata.com/0.5/#Cognite-API-Time-series
"""
import io
import os
import time
import math
from functools import partial
from multiprocessing import Pool
from typing import List
from urllib.parse import quote
import numpy as np
from requests_futures.sessions import FuturesSession
from concurrent.futures import ThreadPoolExecutor
import cognite._constants as _constants
import cognite._utils as _utils
import cognite.config as config
import pandas as pd
from cognite._protobuf_descriptors import _api_timeseries_data_v2_pb2
from cognite.v05.dto import (
Datapoint,
DatapointsResponse,
DatapointsResponseIterator,
LatestDatapointResponse,
TimeSeries,
TimeSeriesResponse,
TimeseriesWithDatapoints,
)
def get_datapoints(name, aggregates=None, granularity=None, start=None, end=None, **kwargs):
"""Returns a DatapointsObject containing a list of datapoints for the given query.
This method will automate paging for the user and return all data for the given time period.
Args:
name (str): The name of the timeseries to retrieve data for.
aggregates (list): The list of aggregate functions you wish to apply to the data. Valid aggregate functions
are: 'average/avg, max, min, count, sum, interpolation/int, stepinterpolation/step'.
granularity (str): The granularity of the aggregate values. Valid entries are : 'day/d, hour/h, minute/m,
second/s', or a multiple of these indicated by a number as a prefix e.g. '12hour'.
start (Union[str, int, datetime]): Get datapoints after this time. Format is N[timeunit]-ago where timeunit is w,d,h,m,s.
E.g. '2d-ago' will get everything that is up to 2 days old. Can also send time in ms since
epoch or a datetime object which will be converted to ms since epoch UTC.
end (Union[str, int, datetime]): Get datapoints up to this time. Same format as for start.
Keyword Arguments:
protobuf (bool): Download the data using the binary protobuf format. Only applicable when getting raw data.
Defaults to True.
processes (int): Number of download processes to run in parallell. Defaults to number returned by cpu_count().
api_key (str): Your api-key.
project (str): Project name.
limit (str): Max number of datapoints to return. If limit is specified, this method will not automate
paging and will return a maximum of 100,000 dps.
Returns:
v05.dto.DatapointsResponse: A data object containing the requested data with several getter methods with different
output formats.
"""
api_key, project = config.get_config_variables(kwargs.get("api_key"), kwargs.get("project"))
start, end = _utils.interval_to_ms(start, end)
if kwargs.get("limit"):
return _get_datapoints_user_defined_limit(
name,
aggregates,
granularity,
start,
end,
limit=kwargs.get("limit"),
protobuf=kwargs.get("protobuf"),
api_key=api_key,
project=project,
)
diff = end - start
num_of_processes = kwargs.get("processes", os.cpu_count())
granularity_ms = 1
if granularity:
granularity_ms = _utils.granularity_to_ms(granularity)
# Ensure that number of steps is not greater than the number data points that will be returned
steps = min(num_of_processes, max(1, int(diff / granularity_ms)))
# Make step size a multiple of the granularity requested in order to ensure evenly spaced results
step_size = _utils.round_to_nearest(int(diff / steps), base=granularity_ms)
# Create list of where each of the parallelized intervals will begin
step_starts = [start + (i * step_size) for i in range(steps)]
args = [{"start": start, "end": start + step_size} for start in step_starts]
partial_get_dps = partial(
_get_datapoints_helper_wrapper,
name=name,
aggregates=aggregates,
granularity=granularity,
protobuf=kwargs.get("protobuf", True),
api_key=api_key,
project=project,
)
if steps == 1:
dps = _get_datapoints_helper(
name,
aggregates,
granularity,
start,
end,
protobuf=kwargs.get("protobuf", True),
api_key=api_key,
project=project,
)
return DatapointsResponse({"data": {"items": [{"name": name, "datapoints": dps}]}})
with Pool(steps) as p:
datapoints = p.map(partial_get_dps, args)
concat_dps = []
[concat_dps.extend(el) for el in datapoints]
return DatapointsResponse({"data": {"items": [{"name": name, "datapoints": concat_dps}]}})
def _get_datapoints_helper_wrapper(args, name, aggregates, granularity, protobuf, api_key, project):
return _get_datapoints_helper(
name, aggregates, granularity, args["start"], args["end"], protobuf=protobuf, api_key=api_key, project=project
)
def _get_datapoints_helper(name, aggregates=None, granularity=None, start=None, end=None, **kwargs):
"""Returns a list of datapoints for the given query.
This method will automate paging for the given time period.
Args:
name (str): The name of the timeseries to retrieve data for.
aggregates (list): The list of aggregate functions you wish to apply to the data. Valid aggregate functions
are: 'average/avg, max, min, count, sum, interpolation/int, stepinterpolation/step'.
granularity (str): The granularity of the aggregate values. Valid entries are : 'day/d, hour/h, minute/m,
second/s', or a multiple of these indicated by a number as a prefix e.g. '12hour'.
start (Union[str, int, datetime]): Get datapoints after this time. Format is N[timeunit]-ago where timeunit is w,d,h,m,s.
E.g. '2d-ago' will get everything that is up to 2 days old. Can also send time in ms since
epoch or a datetime object which will be converted to ms since epoch UTC.
end (Union[str, int, datetime]): Get datapoints up to this time. Same format as for start.
Keyword Arguments:
protobuf (bool): Download the data using the binary protobuf format. Only applicable when getting raw data.
Defaults to True.
api_key (str): Your api-key. Obligatory in this helper method.
project (str): Project name. Obligatory in this helper method.
Returns:
list of datapoints: A list containing datapoint dicts.
"""
api_key, project = kwargs.get("api_key"), kwargs.get("project")
url = config.get_base_url(api_version=0.5) + "/projects/{}/timeseries/data/{}".format(project, quote(name, safe=""))
use_protobuf = kwargs.get("protobuf", True) and aggregates is None
limit = _constants.LIMIT if aggregates is None else _constants.LIMIT_AGG
params = {"aggregates": aggregates, "granularity": granularity, "limit": limit, "start": start, "end": end}
headers = {"api-key": api_key, "accept": "application/protobuf" if use_protobuf else "application/json"}
datapoints = []
while (not datapoints or len(datapoints[-1]) == limit) and params["end"] > params["start"]:
res = _utils.get_request(url, params=params, headers=headers)
if use_protobuf:
ts_data = _api_timeseries_data_v2_pb2.TimeseriesData()
ts_data.ParseFromString(res.content)
res = [{"timestamp": p.timestamp, "value": p.value} for p in ts_data.numericData.points]
else:
res = res.json()["data"]["items"][0]["datapoints"]
if not res:
break
datapoints.append(res)
latest_timestamp = int(datapoints[-1][-1]["timestamp"])
params["start"] = latest_timestamp + (_utils.granularity_to_ms(granularity) if granularity else 1)
dps = []
[dps.extend(el) for el in datapoints]
return dps
def _get_datapoints_user_defined_limit(name, aggregates, granularity, start, end, limit, **kwargs):
"""Returns a DatapointsResponse object with the requested data.
No paging or parallelizing is done.
Args:
name (str): The name of the timeseries to retrieve data for.
aggregates (list): The list of aggregate functions you wish to apply to the data. Valid aggregate functions
are: 'average/avg, max, min, count, sum, interpolation/int, stepinterpolation/step'.
granularity (str): The granularity of the aggregate values. Valid entries are : 'day/d, hour/h, minute/m,
second/s', or a multiple of these indicated by a number as a prefix e.g. '12hour'.
start (Union[str, int, datetime]): Get datapoints after this time. Format is N[timeunit]-ago where timeunit is w,d,h,m,s.
E.g. '2d-ago' will get everything that is up to 2 days old. Can also send time in ms since
epoch or a datetime object which will be converted to ms since epoch UTC.
end (Union[str, int, datetime]): Get datapoints up to this time. Same format as for start.
limit (str): Max number of datapoints to return. Max is 100,000.
Keyword Arguments:
protobuf (bool): Download the data using the binary protobuf format. Only applicable when getting raw data.
Defaults to True.
api_key (str): Your api-key. Obligatory in this helper method.
project (str): Project name. Obligatory in this helper method.
Returns:
v05.dto.DatapointsResponse: A data object containing the requested data with several getter methods with different
output formats.
"""
api_key, project = kwargs.get("api_key"), kwargs.get("project")
url = config.get_base_url(api_version=0.5) + "/projects/{}/timeseries/data/{}".format(project, quote(name, safe=""))
use_protobuf = kwargs.get("protobuf", True) and aggregates is None
params = {"aggregates": aggregates, "granularity": granularity, "limit": limit, "start": start, "end": end}
headers = {"api-key": api_key, "accept": "application/protobuf" if use_protobuf else "application/json"}
res = _utils.get_request(url, params=params, headers=headers)
if use_protobuf:
ts_data = _api_timeseries_data_v2_pb2.TimeseriesData()
ts_data.ParseFromString(res.content)
res = [{"timestamp": p.timestamp, "value": p.value} for p in ts_data.numericData.points]
else:
res = res.json()["data"]["items"][0]["datapoints"]
return DatapointsResponse({"data": {"items": [{"name": name, "datapoints": res}]}})
def _split_TimeseriesWithDatapoints_if_over_limit(
timeseries_with_datapoints: TimeseriesWithDatapoints, limit: int
) -> List[TimeseriesWithDatapoints]:
"""Takes a TimeseriesWithDatapoints and splits it into multiple so that each has a max number of datapoints equal
to the limit given.
Args:
timeseries_with_datapoints (v05.dto.TimeseriesWithDatapoints): The timeseries with data to potentially split up.
Returns:
A list of v05.dto.TimeSeriesWithDatapoints where each has a maximum number of datapoints equal to the limit given.
"""
timeseries_with_datapoints_list = []
if len(timeseries_with_datapoints.datapoints) > limit:
i = 0
while i < len(timeseries_with_datapoints.datapoints):
timeseries_with_datapoints_list.append(
TimeseriesWithDatapoints(
name=timeseries_with_datapoints.name,
datapoints=timeseries_with_datapoints.datapoints[i : i + limit],
)
)
i += limit
else:
timeseries_with_datapoints_list.append(timeseries_with_datapoints)
return timeseries_with_datapoints_list
def post_multi_tag_datapoints(timeseries_with_datapoints: List[TimeseriesWithDatapoints], **kwargs):
"""Insert data into multiple timeseries.
Args:
timeseries_with_datapoints (List[v05.dto.TimeseriesWithDatapoints]): The timeseries with data to insert.
Keyword Args:
api_key (str): Your api-key.
project (str): Project name.
use_gzip (bool): Whether or not to gzip the request
Returns:
An empty response.
"""
api_key, project = config.get_config_variables(kwargs.get("api_key"), kwargs.get("project"))
url = config.get_base_url(api_version=0.4) + "/projects/{}/timeseries/data".format(project)
use_gzip = kwargs.get("use_gzip", False)
headers = {"api-key": api_key, "content-type": "application/json", "accept": "application/json"}
ul_dps_limit = 100000
# Make sure we only work with TimeseriesWithDatapoints objects that has a max number of datapoints
timeseries_with_datapoints_limited = []
for entry in timeseries_with_datapoints:
timeseries_with_datapoints_limited.extend(_split_TimeseriesWithDatapoints_if_over_limit(entry, ul_dps_limit))
# Group these TimeseriesWithDatapoints if possible so that we upload as much as possible in each call to the API
timeseries_to_upload_binned = _utils.first_fit(
list_items=timeseries_with_datapoints_limited, max_size=ul_dps_limit, get_count=lambda x: len(x.datapoints)
)
for bin in timeseries_to_upload_binned:
body = {
"items": [
{"tagId": ts_with_data.name, "datapoints": [dp.__dict__ for dp in ts_with_data.datapoints]}
for ts_with_data in bin
]
}
res = _utils.post_request(url, body=body, headers=headers, use_gzip=use_gzip)
return res.json()
def get_num_chunks(num_datapoints, max_post_size, num_workers):
nd = num_datapoints
return math.ceil(min(max(num_workers, nd/max_post_size), nd))
def post_datapoints(name, datapoints: List[Datapoint], **kwargs):
"""Insert a list of datapoints.
Args:
name (str): Name of timeseries to insert to.
datapoints (list[v05.dto.Datapoint): List of datapoint data transfer objects to insert.
Keyword Args:
api_key (str): Your api-key.
project (str): Project name.
Returns:
An empty response.
"""
num_workers = config.get_number_of_parallel_workers(kwargs.get("workers"))
api_key, project = config.get_config_variables(kwargs.get("api_key"), kwargs.get("project"))
url = config.get_base_url(api_version=0.5) + \
"/projects/{}/timeseries/data/{}".format(project, quote(name, safe=""))
headers = {"api-key": api_key, "content-type": "application/json", "accept": "application/json"}
session = FuturesSession(executor=ThreadPoolExecutor(max_workers=num_workers))
def do_post(chunk):
body = {"items": [dp.__dict__ for dp in chunk]}
return _utils.post_request(url, body=body, headers=headers, session=session)
ul_dps_limit = 100000
num_chunks = get_num_chunks(len(datapoints), ul_dps_limit, num_workers)
chunk_indices = np.linspace(0, len(datapoints), num_chunks + 1, dtype=int)
chunks = [datapoints[s:e] for s, e in zip(chunk_indices[:-1], chunk_indices[1:])]
responses = [None for _ in chunks]
failures = [None for _ in chunks]
for retry in range(config.get_number_of_retries() + 1):
futures = [do_post(chunk) for chunk, res in zip(chunks, responses) if not res]
fut_idx = 0
for res_idx in range(len(responses)):
if responses[res_idx]:
continue
try:
response = futures[fut_idx].result()
if response.status_code == 200:
responses[res_idx] = response
else:
failures[res_idx] = response
print(f'Failure on retry {retry}: response #{res_idx} had status '
f'code {response.status_code}.')
except Exception as e:
failures[res_idx] = e
print(f'Failure on retry {retry}: response #{res_idx} threw exception with '
f'message {e}.')
fut_idx += 1
for res_idx, fail in enumerate(failures):
if responses[res_idx] is None:
responses[res_idx] = fail
return responses
def get_latest(name, **kwargs):
"""Returns a LatestDatapointObject containing the latest datapoint for the given timeseries.
Args:
name (str): The name of the timeseries to retrieve data for.
Keyword Arguments:
api_key (str): Your api-key.
project (str): Project name.
Returns:
v05.dto.LatestDatapointsResponse: A data object containing the requested data with several getter methods with different
output formats.
"""
api_key, project = config.get_config_variables(kwargs.get("api_key"), kwargs.get("project"))
url = config.get_base_url(api_version=0.5) + "/projects/{}/timeseries/latest/{}".format(
project, quote(name, safe="")
)
headers = {"api-key": api_key, "accept": "application/json"}
res = _utils.get_request(url, headers=headers, cookies=config.get_cookies())
return LatestDatapointResponse(res.json())
def get_multi_time_series_datapoints(
datapoints_queries, aggregates=None, granularity=None, start=None, end=None, **kwargs
):
"""Returns a list of DatapointsObjects each of which contains a list of datapoints for the given timeseries.
This method will automate paging for the user and return all data for the given time period(s).
Args:
datapoints_queries (list[v05.dto.DatapointsQuery]): The list of DatapointsQuery objects specifying which
timeseries to retrieve data for.
aggregates (list, optional): The list of aggregate functions you wish to apply to the data. Valid aggregate
functions are: 'average/avg, max, min, count, sum, interpolation/int,
stepinterpolation/step'.
granularity (str): The granularity of the aggregate values. Valid entries are : 'day/d, hour/h,
minute/m, second/s', or a multiple of these indicated by a number as a prefix
e.g. '12hour'.
start (Union[str, int, datetime]): Get datapoints after this time. Format is N[timeunit]-ago where timeunit is w,d,h,m,s.
E.g. '2d-ago' will get everything that is up to 2 days old. Can also send time in ms since
epoch or a datetime object which will be converted to ms since epoch UTC.
end (Union[str, int, datetime]): Get datapoints up to this time. Same format as for start.
Keyword Arguments:
api_key (str): Your api-key.
project (str): Project name.
Returns:
list(v05.dto.DatapointsResponse): A list of data objects containing the requested data with several getter methods
with different output formats.
"""
api_key, project = config.get_config_variables(kwargs.get("api_key"), kwargs.get("project"))
url = config.get_base_url(api_version=0.5) + "/projects/{}/timeseries/dataquery".format(project)
start, end = _utils.interval_to_ms(start, end)
num_of_dpqs_with_agg = 0
num_of_dpqs_raw = 0
for dpq in datapoints_queries:
if (dpq.aggregates is None and aggregates is None) or dpq.aggregates == "":
num_of_dpqs_raw += 1
else:
num_of_dpqs_with_agg += 1
items = []
for dpq in datapoints_queries:
if dpq.aggregates is None and aggregates is None:
dpq.limit = int(_constants.LIMIT / num_of_dpqs_raw)
else:
dpq.limit = int(_constants.LIMIT_AGG / num_of_dpqs_with_agg)
items.append(dpq.__dict__)
body = {
"items": items,
"aggregates": ",".join(aggregates) if aggregates is not None else None,
"granularity": granularity,
"start": start,
"end": end,
}
headers = {"api-key": api_key, "content-type": "application/json", "accept": "application/json"}
datapoints_responses = []
has_incomplete_requests = True
while has_incomplete_requests:
res = _utils.post_request(url=url, body=body, headers=headers, cookies=config.get_cookies()).json()["data"][
"items"
]
datapoints_responses.append(res)
has_incomplete_requests = False
for i, dpr in enumerate(res):
dpq = datapoints_queries[i]
if len(dpr["datapoints"]) == dpq.limit:
has_incomplete_requests = True
latest_timestamp = dpr["datapoints"][-1]["timestamp"]
ts_granularity = granularity if dpq.granularity is None else dpq.granularity
next_start = latest_timestamp + (_utils.granularity_to_ms(ts_granularity) if ts_granularity else 1)
else:
next_start = end - 1
if datapoints_queries[i].end:
next_start = datapoints_queries[i].end - 1
datapoints_queries[i].start = next_start
results = [{"data": {"items": [{"name": dpq.name, "datapoints": []}]}} for dpq in datapoints_queries]
for res in datapoints_responses:
for i, ts in enumerate(res):
results[i]["data"]["items"][0]["datapoints"].extend(ts["datapoints"])
return DatapointsResponseIterator([DatapointsResponse(result) for result in results])
def get_datapoints_frame(time_series, aggregates, granularity, start=None, end=None, **kwargs):
"""Returns a pandas dataframe of datapoints for the given timeseries all on the same timestamps.
This method will automate paging for the user and return all data for the given time period.
Args:
time_series (list): The list of timeseries names to retrieve data for. Each timeseries can be either a string
containing the timeseries or a dictionary containing the names of thetimeseries and a
list of specific aggregate functions.
aggregates (list): The list of aggregate functions you wish to apply to the data for which you have not
specified an aggregate function. Valid aggregate functions are: 'average/avg, max, min,
count, sum, interpolation/int, stepinterpolation/step'.
granularity (str): The granularity of the aggregate values. Valid entries are : 'day/d, hour/h, minute/m,
second/s', or a multiple of these indicated by a number as a prefix e.g. '12hour'.
start (Union[str, int, datetime]): Get datapoints after this time. Format is N[timeunit]-ago where timeunit is w,d,h,m,s.
E.g. '2d-ago' will get everything that is up to 2 days old. Can also send time in ms since
epoch or a datetime object which will be converted to ms since epoch UTC.
end (Union[str, int, datetime]): Get datapoints up to this time. Same format as for start.
Keyword Arguments:
api_key (str): Your api-key.
project (str): Project name.
cookies (dict): Cookies.
limit (str): Max number of rows to return. If limit is specified, this method will not automate
paging and will return a maximum of 100,000 rows.
processes (int): Number of download processes to run in parallell. Defaults to number returned by cpu_count().
Returns:
pandas.DataFrame: A pandas dataframe containing the datapoints for the given timeseries. The datapoints for all the
timeseries will all be on the same timestamps.
Note:
The ``timeseries`` parameter can take a list of strings and/or dicts on the following formats::
Using strings:
['<timeseries1>', '<timeseries2>']
Using dicts:
[{'name': '<timeseries1>', 'aggregates': ['<aggfunc1>', '<aggfunc2>']},
{'name': '<timeseries2>', 'aggregates': []}]
Using both:
['<timeseries1>', {'name': '<timeseries2>', 'aggregates': ['<aggfunc1>', '<aggfunc2>']}]
"""
if not isinstance(time_series, list):
raise _utils.InputError("time_series should be a list")
api_key, project = config.get_config_variables(kwargs.get("api_key"), kwargs.get("project"))
cookies = config.get_cookies(kwargs.get("cookies"))
start, end = _utils.interval_to_ms(start, end)
if kwargs.get("limit"):
return _get_datapoints_frame_user_defined_limit(
time_series,
aggregates,
granularity,
start,
end,
limit=kwargs.get("limit"),
api_key=api_key,
project=project,
cookies=cookies,
)
diff = end - start
num_of_processes = kwargs.get("processes") or os.cpu_count()
granularity_ms = 1
if granularity:
granularity_ms = _utils.granularity_to_ms(granularity)
# Ensure that number of steps is not greater than the number data points that will be returned
steps = min(num_of_processes, max(1, int(diff / granularity_ms)))
# Make step size a multiple of the granularity requested in order to ensure evenly spaced results
step_size = _utils.round_to_nearest(int(diff / steps), base=granularity_ms)
# Create list of where each of the parallelized intervals will begin
step_starts = [start + (i * step_size) for i in range(steps)]
args = [{"start": start, "end": start + step_size} for start in step_starts]
partial_get_dpsf = partial(
_get_datapoints_frame_helper_wrapper,
time_series=time_series,
aggregates=aggregates,
granularity=granularity,
api_key=api_key,
project=project,
cookies=cookies,
)
if steps == 1:
return _get_datapoints_frame_helper(
time_series, aggregates, granularity, start, end, api_key=api_key, project=project, cookies=cookies
)
with Pool(steps) as p:
dataframes = p.map(partial_get_dpsf, args)
df = pd.concat(dataframes).drop_duplicates(subset="timestamp").reset_index(drop=True)
return df
def _get_datapoints_frame_helper_wrapper(args, time_series, aggregates, granularity, api_key, project, cookies):
return _get_datapoints_frame_helper(
time_series,
aggregates,
granularity,
args["start"],
args["end"],
api_key=api_key,
project=project,
cookies=cookies,
)
def _get_datapoints_frame_helper(time_series, aggregates, granularity, start=None, end=None, **kwargs):
"""Returns a pandas dataframe of datapoints for the given timeseries all on the same timestamps.
This method will automate paging for the user and return all data for the given time period.
Args:
time_series (list): The list of timeseries names to retrieve data for. Each timeseries can be either a string containing the
ts name or a dictionary containing the ts name and a list of specific aggregate functions.
aggregates (list): The list of aggregate functions you wish to apply to the data for which you have not
specified an aggregate function. Valid aggregate functions are: 'average/avg, max, min,
count, sum, interpolation/int, stepinterpolation/step'.
granularity (str): The granularity of the aggregate values. Valid entries are : 'day/d, hour/h, minute/m,
second/s', or a multiple of these indicated by a number as a prefix e.g. '12hour'.
start (Union[str, int, datetime]): Get datapoints after this time. Format is N[timeunit]-ago where timeunit is w,d,h,m,s.
E.g. '2d-ago' will get everything that is up to 2 days old. Can also send time in ms since
epoch or a datetime object which will be converted to ms since epoch UTC.
end (Union[str, int, datetime]): Get datapoints up to this time. Same format as for start.
Keyword Arguments:
api_key (str): Your api-key.
project (str): Project name.
Returns:
pandas.DataFrame: A pandas dataframe containing the datapoints for the given timeseries. The datapoints for all the
timeseries will all be on the same timestamps.
Note:
The ``timeseries`` parameter can take a list of strings and/or dicts on the following formats::
Using strings:
['<timeseries1>', '<timeseries2>']
Using dicts:
[{'name': '<timeseries1>', 'aggregates': ['<aggfunc1>', '<aggfunc2>']},
{'name': '<timeseries2>', 'aggregates': []}]
Using both:
['<timeseries1>', {'name': '<timeseries2>', 'aggregates': ['<aggfunc1>', '<aggfunc2>']}]
"""
api_key, project = kwargs.get("api_key"), kwargs.get("project")
cookies = kwargs.get("cookies")
url = config.get_base_url(api_version=0.5) + "/projects/{}/timeseries/dataframe".format(project)
num_aggregates = 0
for ts in time_series:
if isinstance(ts, str) or ts.get("aggregates") is None:
num_aggregates += len(aggregates)
else:
num_aggregates += len(ts["aggregates"])
per_tag_limit = int(_constants.LIMIT / num_aggregates)
body = {
"items": [
{"name": "{}".format(ts)}
if isinstance(ts, str)
else {"name": "{}".format(ts["name"]), "aggregates": ts.get("aggregates", [])}
for ts in time_series
],
"aggregates": aggregates,
"granularity": granularity,
"start": start,
"end": end,
"limit": per_tag_limit,
}
headers = {"api-key": api_key, "content-type": "application/json", "accept": "text/csv"}
dataframes = []
while (not dataframes or dataframes[-1].shape[0] == per_tag_limit) and body["end"] > body["start"]:
res = _utils.post_request(url=url, body=body, headers=headers, cookies=cookies)
dataframes.append(
pd.read_csv(io.StringIO(res.content.decode(res.encoding if res.encoding else res.apparent_encoding)))
)
if dataframes[-1].empty:
break
latest_timestamp = int(dataframes[-1].iloc[-1, 0])
body["start"] = latest_timestamp + _utils.granularity_to_ms(granularity)
return | pd.concat(dataframes) | pandas.concat |
# -*- coding: utf-8 -*-
#https://github.com/gboeing/osmnx-examples/tree/master/notebooks
#import database.mongo_loginManager as mdb
#import database.models.odm_distribution_mongo as model_dist
#import database.models.odm_production_mongo as model_prod
from logproj.ml_dataCleaning import cleanUsingIQR
import osmnx as ox
import numpy as np
import pandas as pd
def import_graph_drive(D_node,latCol,lonCol,D_plant, plantLatitude, plantLongitude,cleanOutliers=False):
'''
the function imports a road network using osmnx library
D_node is the table containing the nodes of the network
latCol is the name attribute of the latitude of the node collection
lonCol is the name attribute of the longitude of the node collection
D_plant id the table containing the plant of the network
plantLatitude is the name attribute of the latitude of the plant collection
plantLongitude is the name attribute of the longitude of the plant collection
cleanOutliers is True to remove outliers of latitude and logitude by using IQR
return the cleaned dataframe and a coverage tuple
'''
coverages=(1,np.nan)
#mdb.setConnection(dbName)
#D_plant=mdb.queryTodf(model_prod.plant.objects)
#D_node=mdb.queryTodf(model_dist.node.objects)
#remove latitude and longitude outliers
if cleanOutliers:
D_node, coverages, =cleanUsingIQR(D_node, [latCol,lonCol])
allLatitudes=list(D_node[latCol]) + list(D_plant[plantLatitude])
allLongitudes=list(D_node[lonCol]) + list(D_plant[plantLongitude])
min_lat = min(allLatitudes)
max_lat = max(allLatitudes)
min_lon = min(allLongitudes)
max_Lon = max(allLongitudes)
G = ox.graph_from_bbox(max_lat, min_lat,max_Lon,min_lon, network_type='drive')
output_coverages = | pd.DataFrame(coverages) | pandas.DataFrame |
import glob
import os
import sys
# these imports and usings need to be in the same order
sys.path.insert(0, "../")
sys.path.insert(0, "TP_model")
sys.path.insert(0, "TP_model/fit_and_forecast")
from Reff_functions import *
from Reff_constants import *
from sys import argv
from datetime import timedelta, datetime
from scipy.special import expit
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use("Agg")
def forecast_TP(data_date):
from scenarios import scenarios, scenario_dates
from params import (
num_forecast_days,
alpha_start_date,
delta_start_date,
omicron_start_date,
truncation_days,
start_date,
sim_start_date,
third_start_date,
mob_samples,
)
data_date = pd.to_datetime(data_date)
# Define inputs
sim_start_date = pd.to_datetime(sim_start_date)
# Add 3 days buffer to mobility forecast
num_forecast_days = num_forecast_days + 3
# data_date = pd.to_datetime('2022-01-25')
print("============")
print("Generating forecasts using data from", data_date)
print("============")
# convert third start date to the correct format
third_start_date = pd.to_datetime(third_start_date)
third_end_date = data_date - timedelta(truncation_days)
# a different end date to deal with issues in fitting
third_end_date_diff = data_date - timedelta(18 + 7 + 7)
third_states = sorted(["NSW", "VIC", "ACT", "QLD", "SA", "TAS", "NT", "WA"])
# third_states = sorted(['NSW', 'VIC', 'ACT', 'QLD', 'SA', 'NT'])
# choose dates for each state for third wave
# NOTE: These need to be in date sorted order
third_date_range = {
"ACT": pd.date_range(start="2021-08-15", end=third_end_date).values,
"NSW": pd.date_range(start="2021-06-25", end=third_end_date).values,
"NT": pd.date_range(start="2021-12-20", end=third_end_date).values,
"QLD": pd.date_range(start="2021-07-30", end=third_end_date).values,
"SA": pd.date_range(start="2021-12-10", end=third_end_date).values,
"TAS": pd.date_range(start="2021-12-20", end=third_end_date).values,
"VIC": pd.date_range(start="2021-07-10", end=third_end_date).values,
"WA": pd.date_range(start="2022-01-01", end=third_end_date).values,
}
# Get Google Data - Don't use the smoothed data?
df_google_all = read_in_google(Aus_only=True, moving=True, local=True)
third_end_date = pd.to_datetime(data_date) - pd.Timedelta(days=truncation_days)
results_dir = (
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/"
)
# Load in vaccination data by state and date which should have the same date as the
# NNDSS/linelist data use the inferred VE
vaccination_by_state_delta = pd.read_csv(
results_dir + "adjusted_vaccine_ts_delta" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_delta = vaccination_by_state_delta[["state", "date", "effect"]]
vaccination_by_state_delta = vaccination_by_state_delta.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_delta_array = vaccination_by_state_delta.to_numpy()
vaccination_by_state_omicron = pd.read_csv(
results_dir + "adjusted_vaccine_ts_omicron" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_omicron = vaccination_by_state_omicron[["state", "date", "effect"]]
vaccination_by_state_omicron = vaccination_by_state_omicron.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_omicron_array = vaccination_by_state_omicron.to_numpy()
# Get survey data
surveys = pd.DataFrame()
path = "data/md/Barometer wave*.csv"
for file in glob.glob(path):
surveys = surveys.append(pd.read_csv(file, parse_dates=["date"]))
surveys = surveys.sort_values(by="date")
print("Latest microdistancing survey is {}".format(surveys.date.values[-1]))
surveys.loc[surveys.state != "ACT", "state"] = (
surveys.loc[surveys.state != "ACT", "state"]
.map(states_initials)
.fillna(surveys.loc[surveys.state != "ACT", "state"])
)
surveys["proportion"] = surveys["count"] / surveys.respondents
surveys.date = pd.to_datetime(surveys.date)
always = surveys.loc[surveys.response == "Always"].set_index(["state", "date"])
always = always.unstack(["state"])
# fill in date range
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
always = always.reindex(idx, fill_value=np.nan)
always.index.name = "date"
always = always.fillna(method="bfill")
always = always.stack(["state"])
# Zero out before first survey 20th March
always = always.reset_index().set_index("date")
always.loc[:"2020-03-20", "count"] = 0
always.loc[:"2020-03-20", "respondents"] = 0
always.loc[:"2020-03-20", "proportion"] = 0
always = always.reset_index().set_index(["state", "date"])
survey_X = pd.pivot_table(
data=always, index="date", columns="state", values="proportion"
)
prop_all = survey_X
## read in and process mask wearing data
mask_wearing = pd.DataFrame()
path = "data/face_coverings/face_covering_*_.csv"
for file in glob.glob(path):
mask_wearing = mask_wearing.append(pd.read_csv(file, parse_dates=["date"]))
mask_wearing = mask_wearing.sort_values(by="date")
print("Latest mask wearing survey is {}".format(mask_wearing.date.values[-1]))
# mask_wearing['state'] = mask_wearing['state'].map(states_initials).fillna(mask_wearing['state'])
mask_wearing.loc[mask_wearing.state != "ACT", "state"] = (
mask_wearing.loc[mask_wearing.state != "ACT", "state"]
.map(states_initials)
.fillna(mask_wearing.loc[mask_wearing.state != "ACT", "state"])
)
mask_wearing["proportion"] = mask_wearing["count"] / mask_wearing.respondents
mask_wearing.date = pd.to_datetime(mask_wearing.date)
mask_wearing_always = mask_wearing.loc[
mask_wearing.face_covering == "Always"
].set_index(["state", "date"])
mask_wearing_always = mask_wearing_always.unstack(["state"])
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
mask_wearing_always = mask_wearing_always.reindex(idx, fill_value=np.nan)
mask_wearing_always.index.name = "date"
# fill back to earlier and between weeks.
# Assume survey on day x applies for all days up to x - 6
mask_wearing_always = mask_wearing_always.fillna(method="bfill")
mask_wearing_always = mask_wearing_always.stack(["state"])
# Zero out before first survey 20th March
mask_wearing_always = mask_wearing_always.reset_index().set_index("date")
mask_wearing_always.loc[:"2020-03-20", "count"] = 0
mask_wearing_always.loc[:"2020-03-20", "respondents"] = 0
mask_wearing_always.loc[:"2020-03-20", "proportion"] = 0
mask_wearing_X = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="proportion"
)
mask_wearing_all = mask_wearing_X
# Get posterior
df_samples = read_in_posterior(
date=data_date.strftime("%Y-%m-%d"),
)
states = sorted(["NSW", "QLD", "SA", "VIC", "TAS", "WA", "ACT", "NT"])
plot_states = states.copy()
one_month = data_date + timedelta(days=num_forecast_days)
days_from_March = (one_month - pd.to_datetime(start_date)).days
# filter out future info
prop = prop_all.loc[:data_date]
masks = mask_wearing_all.loc[:data_date]
df_google = df_google_all.loc[df_google_all.date <= data_date]
# use this trick of saving the google data and then reloading it to kill
# the date time values
df_google.to_csv("results/test_google_data.csv")
df_google = pd.read_csv("results/test_google_data.csv")
# remove the temporary file
# os.remove("results/test_google_data.csv")
# Simple interpolation for missing vlaues in Google data
df_google = df_google.interpolate(method="linear", axis=0)
df_google.date = pd.to_datetime(df_google.date)
# forecast time parameters
today = data_date.strftime("%Y-%m-%d")
# add days to forecast if we are missing data
if df_google.date.values[-1] < data_date:
n_forecast = num_forecast_days + (data_date - df_google.date.values[-1]).days
else:
n_forecast = num_forecast_days
training_start_date = datetime(2020, 3, 1, 0, 0)
print(
"Forecast ends at {} days after 1st March".format(
(pd.to_datetime(today) - pd.to_datetime(training_start_date)).days
+ num_forecast_days
)
)
print(
"Final date is {}".format(pd.to_datetime(today) + timedelta(days=num_forecast_days))
)
df_google = df_google.loc[df_google.date >= training_start_date]
outdata = {"date": [], "type": [], "state": [], "mean": [], "std": []}
predictors = mov_values.copy()
# predictors.remove("residential_7days")
# Setup Figures
axes = []
figs = []
for var in predictors:
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
# fig.suptitle(var)
figs.append(fig)
# extra fig for microdistancing
var = "Proportion people always microdistancing"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
# # extra fig for mask wearing
var = "Proportion people always wearing masks"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
var = "Reduction in Reff due to vaccination"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
var = "Reduction in Reff due to vaccination"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
# Forecasting Params
n_training = 21 # Period to examine trend
n_baseline = 150 # Period to create baseline
n_training_vaccination = 30 # period to create trend for vaccination
# since this can be useful, predictor ordering is:
# [
# 'retail_and_recreation_7days',
# 'grocery_and_pharmacy_7days',
# 'parks_7days',
# 'transit_stations_7days',
# 'workplaces_7days'
# ]
# Loop through states and run forecasting.
print("============")
print("Forecasting macro, micro and vaccination")
print("============")
state_Rmed = {}
state_sims = {}
for i, state in enumerate(states):
rownum = int(i / 2)
colnum = np.mod(i, 2)
rows = df_google.loc[df_google.state == state].shape[0]
# Rmed currently a list, needs to be a matrix
Rmed_array = np.zeros(shape=(rows, len(predictors), mob_samples))
for j, var in enumerate(predictors):
for n in range(mob_samples):
# historically we want a little more noise. In the actual forecasting of trends
# we don't want this to be quite that prominent.
Rmed_array[:, j, n] = df_google[df_google["state"] == state][
var
].values.T + np.random.normal(
loc=0, scale=df_google[df_google["state"] == state][var + "_std"]
)
dates = df_google[df_google["state"] == state]["date"]
# cap min and max at historical or (-50,0)
# 1 by predictors by mob_samples size
minRmed_array = np.minimum(-50, np.amin(Rmed_array, axis=0))
maxRmed_array = np.maximum(10, np.amax(Rmed_array, axis=0))
# days by predictors by samples
sims = np.zeros(shape=(n_forecast, len(predictors), mob_samples))
for n in range(mob_samples): # Loop through simulations
Rmed = Rmed_array[:, :, n]
minRmed = minRmed_array[:, n]
maxRmed = maxRmed_array[:, n]
if maxRmed[1] < 20:
maxRmed[1] = 50
R_baseline_mean = np.mean(Rmed[-n_baseline:, :], axis=0)
if state not in {"WA"}:
R_baseline_mean[-1] = 0
R_diffs = np.diff(Rmed[-n_training:, :], axis=0)
mu = np.mean(R_diffs, axis=0)
cov = np.cov(R_diffs, rowvar=False) # columns are vars, rows are obs
# Forecast mobility forward sequentially by day.
# current = np.mean(Rmed[-9:-2, :], axis=0) # Start from last valid days
# current = np.mean(Rmed[-1, :], axis=0) # Start from last valid days
current = Rmed[-1, :] # Start from last valid days
for i in range(n_forecast):
# ## SCENARIO MODELLING
# This code chunk will allow you manually set the distancing params for a state to allow for modelling.
if scenarios[state] == "":
# Proportion of trend_force to regression_to_baseline_force
p_force = (n_forecast - i) / (n_forecast)
# Generate a single forward realisation of trend
trend_force = np.random.multivariate_normal(mu, cov)
# Generate a single forward realisation of baseline regression
# regression to baseline force stronger in standard forecasting
regression_to_baseline_force = np.random.multivariate_normal(
0.05 * (R_baseline_mean - current), cov
)
new_forcast_points = (
current + p_force * trend_force + (1 - p_force) * regression_to_baseline_force
) # Find overall simulation step
# Apply minimum and maximum
new_forcast_points = np.maximum(minRmed, new_forcast_points)
new_forcast_points = np.minimum(maxRmed, new_forcast_points)
current = new_forcast_points
elif scenarios[state] != "":
# Make baseline cov for generating points
cov_baseline = np.cov(Rmed[-42:-28, :], rowvar=False)
mu_current = Rmed[-1, :]
mu_victoria = np.array(
[
-55.35057887,
-22.80891056,
-46.59531636,
-75.99942378,
-44.71119293,
]
)
mu_baseline = np.mean(Rmed[-42:-28, :], axis=0)
# mu_baseline = 0*np.mean(Rmed[-42:-28, :], axis=0)
if scenario_dates[state] != "":
scenario_change_point = (
pd.to_datetime(scenario_dates[state]) - data_date
).days + (n_forecast - 42)
# Constant Lockdown
if (
scenarios[state] == "no_reversion"
or scenarios[state] == "school_opening_2022"
):
# take a continuous median to account for noise in recent observations (such as sunny days)
# mu_current = np.mean(Rmed[-7:, :], axis=0)
# cov_baseline = np.cov(Rmed[-28:, :], rowvar=False)
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
elif scenarios[state] == "no_reversion_continuous_lockdown":
# add the new scenario here
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
# No Lockdown
elif scenarios[state] == "full_reversion":
# a full reversion scenario changes the social mobility and microdistancing
# behaviours at the scenario change date and then applies a return to baseline force
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
# baseline is within lockdown period so take a new baseline of 0's and trend towards this
R_baseline_0 = np.zeros_like(R_baseline_mean)
R_baseline_0 = mu_baseline
# set adjusted baselines by eyeline for now, need to get this automated
# R_baseline_0[1] = 10 # baseline of +10% for Grocery based on other jurisdictions
# # apply specific baselines to the jurisdictions progressing towards normal restrictions
# if state == 'NSW':
# R_baseline_0[3] = -25 # baseline of -25% for Transit based on 2021-April to 2021-July (pre-third-wave lockdowns)
# elif state == 'ACT':
# R_baseline_0[1] = 20 # baseline of +20% for Grocery based on other jurisdictions
# R_baseline_0[3] = -25 # baseline of -25% for Transit based on 2021-April to 2021-July (pre-third-wave lockdowns)
# elif state == 'VIC':
# R_baseline_0[0] = -15 # baseline of -15% for R&R based on 2021-April to 2021-July (pre-third-wave lockdowns)
# R_baseline_0[3] = -30 # baseline of -30% for Transit based on 2021-April to 2021-July (pre-third-wave lockdowns)
# R_baseline_0[4] = -15 # baseline of -15% for workplaces based on 2021-April to 2021-July (pre-third-wave lockdowns)
# the force we trend towards the baseline above with
p_force = (n_forecast - i) / (n_forecast)
trend_force = np.random.multivariate_normal(
mu, cov
) # Generate a single forward realisation of trend
# baseline scalar is smaller for this as we want slow returns
adjusted_baseline_drift_mean = R_baseline_0 - current
# we purposely scale the transit measure so that we increase a little more quickly
# tmp = 0.05 * adjusted_baseline_drift_mean[3]
adjusted_baseline_drift_mean *= 0.005
# adjusted_baseline_drift_mean[3] = tmp
regression_to_baseline_force = np.random.multivariate_normal(
adjusted_baseline_drift_mean, cov
) # Generate a single forward realisation of baseline regression
new_forcast_points = (
current
+ p_force * trend_force
+ (1 - p_force) * regression_to_baseline_force
) # Find overall simulation step
# new_forcast_points = current + regression_to_baseline_force # Find overall simulation step
# Apply minimum and maximum
new_forcast_points = np.maximum(minRmed, new_forcast_points)
new_forcast_points = np.minimum(maxRmed, new_forcast_points)
current = new_forcast_points
elif scenarios[state] == "immediately_baseline":
# this scenario is used to return instantly to the baseline levels
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
# baseline is within lockdown period so take a new baseline of 0's and trend towards this
R_baseline_0 = np.zeros_like(R_baseline_mean)
# jump immediately to baseline
new_forcast_points = np.random.multivariate_normal(
R_baseline_0, cov_baseline
)
# Temporary Lockdown
elif scenarios[state] == "half_reversion":
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
new_forcast_points = np.random.multivariate_normal(
(mu_current + mu_baseline) / 2, cov_baseline
)
# Stage 4
elif scenarios[state] == "stage4":
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
new_forcast_points = np.random.multivariate_normal(
mu_victoria, cov_baseline
)
# Set this day in this simulation to the forecast realisation
sims[i, :, n] = new_forcast_points
dd = [dates.tolist()[-1] + timedelta(days=x) for x in range(1, n_forecast + 1)]
sims_med = np.median(sims, axis=2) # N by predictors
sims_q25 = np.percentile(sims, 25, axis=2)
sims_q75 = np.percentile(sims, 75, axis=2)
# forecast mircodistancing
# Get a baseline value of microdistancing
mu_overall = np.mean(prop[state].values[-n_baseline:])
md_diffs = np.diff(prop[state].values[-n_training:])
mu_diffs = np.mean(md_diffs)
std_diffs = np.std(md_diffs)
extra_days_md = (
pd.to_datetime(df_google.date.values[-1])
- pd.to_datetime(prop[state].index.values[-1])
).days
# Set all values to current value.
current = [prop[state].values[-1]] * mob_samples
new_md_forecast = []
# Forecast mobility forward sequentially by day.
for i in range(n_forecast + extra_days_md):
# SCENARIO MODELLING
# This code chunk will allow you manually set the distancing params for a state to allow for modelling.
if scenarios[state] == "":
# Proportion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_md - i) / (n_forecast + extra_days_md)
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_diffs, size=mob_samples)
# Generate realisations that draw closer to baseline
regression_to_baseline_force = np.random.normal(
0.05 * (mu_overall - current), std_diffs
)
current = (
current
+ p_force * trend_force
+ (1 - p_force) * regression_to_baseline_force
) # Balance forces
# current = current+p_force*trend_force # Balance forces
elif scenarios[state] != "":
current = np.array(current)
# Make baseline cov for generating points
std_baseline = np.std(prop[state].values[-42:-28])
mu_baseline = np.mean(prop[state].values[-42:-28], axis=0)
mu_current = prop[state].values[-1]
if scenario_dates[state] != "":
scenario_change_point = (
pd.to_datetime(scenario_dates[state]) - data_date
).days + extra_days_md
# Constant Lockdown
if (
scenarios[state] == "no_reversion"
or scenarios[state] == "school_opening_2022"
):
# use only more recent data to forecast under a no-reversion scenario
# std_lockdown = np.std(prop[state].values[-24:-4])
# current = np.random.normal(mu_current, std_lockdown)
current = np.random.normal(mu_current, std_baseline)
# No Lockdown
elif scenarios[state] == "full_reversion":
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# Proportion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_md - i) / (
n_forecast + extra_days_md
)
# take a mean of the differences over the last 2 weeks
mu_diffs = np.mean(np.diff(prop[state].values[-14:]))
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_baseline)
# Generate realisations that draw closer to baseline
regression_to_baseline_force = np.random.normal(
0.005 * (mu_baseline_0 - current), std_baseline
)
current = current + regression_to_baseline_force # Balance forces
elif scenarios[state] == "immediately_baseline":
# this scenario is an immediate return to baseline values
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# jump immediately to baseline
current = np.random.normal(mu_baseline_0, std_baseline)
# Temporary Lockdown
elif scenarios[state] == "half_reversion": # No Lockdown
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
# Revert to values halfway between the before and after
current = np.random.normal(
(mu_current + mu_baseline) / 2, std_baseline
)
new_md_forecast.append(current)
md_sims = np.vstack(new_md_forecast) # Put forecast days together
md_sims = np.minimum(1, md_sims)
md_sims = np.maximum(0, md_sims)
dd_md = [
prop[state].index[-1] + timedelta(days=x)
for x in range(1, n_forecast + extra_days_md + 1)
]
## currently not forecasting masks — may return in the future but will need to assess.
# forecast mask wearing compliance
# Get a baseline value of microdistancing
mu_overall = np.mean(masks[state].values[-n_baseline:])
md_diffs = np.diff(masks[state].values[-n_training:])
mu_diffs = np.mean(md_diffs)
std_diffs = np.std(md_diffs)
extra_days_masks = (
pd.to_datetime(df_google.date.values[-1])
- pd.to_datetime(masks[state].index.values[-1])
).days
# Set all values to current value.
current = [masks[state].values[-1]] * mob_samples
new_masks_forecast = []
# Forecast mobility forward sequentially by day.
for i in range(n_forecast + extra_days_masks):
# SCENARIO MODELLING
# This code chunk will allow you manually set the distancing params for a state to allow for modelling.
if scenarios[state] == "":
# masksortion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_masks - i) / (
n_forecast + extra_days_masks
)
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_diffs, size=mob_samples)
# Generate realisations that draw closer to baseline
# regression_to_baseline_force = np.random.normal(0.05*(mu_overall - current), std_diffs)
# current = current + p_force*trend_force + (1-p_force)*regression_to_baseline_force # Balance forces
current = current + trend_force
elif scenarios[state] != "":
current = np.array(current)
# Make baseline cov for generating points
std_baseline = np.std(masks[state].values[-42:-28])
mu_baseline = np.mean(masks[state].values[-42:-28], axis=0)
mu_current = masks[state].values[-1]
if scenario_dates[state] != "":
scenario_change_point = (
pd.to_datetime(scenario_dates[state]) - data_date
).days + extra_days_masks
# Constant Lockdown
if (
scenarios[state] == "no_reversion"
or scenarios[state] == "school_opening_2022"
):
# use only more recent data to forecast under a no-reversion scenario
# std_lockdown = np.std(masks[state].values[-24:-4])
# current = np.random.normal(mu_current, std_lockdown)
current = np.random.normal(mu_current, std_baseline)
# No Lockdown
elif scenarios[state] == "full_reversion":
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# masksortion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_masks - i) / (
n_forecast + extra_days_masks
)
# take a mean of the differences over the last 2 weeks
mu_diffs = np.mean(np.diff(masks[state].values[-14:]))
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_baseline)
# Generate realisations that draw closer to baseline
regression_to_baseline_force = np.random.normal(
0.005 * (mu_baseline_0 - current), std_baseline
)
current = current + regression_to_baseline_force # Balance forces
elif scenarios[state] == "immediately_baseline":
# this scenario is an immediate return to baseline values
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# jump immediately to baseline
current = np.random.normal(mu_baseline_0, std_baseline)
# Temporary Lockdown
elif scenarios[state] == "half_reversion": # No Lockdown
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
# Revert to values halfway between the before and after
current = np.random.normal(
(mu_current + mu_baseline) / 2, std_baseline
)
new_masks_forecast.append(current)
masks_sims = np.vstack(new_masks_forecast) # Put forecast days together
masks_sims = np.minimum(1, masks_sims)
masks_sims = np.maximum(0, masks_sims)
dd_masks = [
masks[state].index[-1] + timedelta(days=x)
for x in range(1, n_forecast + extra_days_masks + 1)
]
# Forecasting vaccine effect
# if state == "WA":
# last_fit_date = pd.to_datetime(third_end_date)
# else:
last_fit_date = pd.to_datetime(third_date_range[state][-1])
extra_days_vacc = (pd.to_datetime(df_google.date.values[-1]) - last_fit_date).days
total_forecasting_days = n_forecast + extra_days_vacc
# get the VE on the last day
mean_delta = vaccination_by_state_delta.loc[state][last_fit_date + timedelta(1)]
mean_omicron = vaccination_by_state_omicron.loc[state][last_fit_date + timedelta(1)]
current = np.zeros_like(mob_samples)
new_delta = []
new_omicron = []
# variance on the vaccine forecasts is equivalent to what we use in the fitting
var_vax = 0.00005
a_vax = np.zeros_like(mob_samples)
b_vax = np.zeros_like(mob_samples)
for d in pd.date_range(
last_fit_date + timedelta(1),
pd.to_datetime(today) + timedelta(days=num_forecast_days),
):
mean_delta = vaccination_by_state_delta.loc[state][d]
a_vax = mean_delta * (mean_delta * (1 - mean_delta) / var_vax - 1)
b_vax = (1 - mean_delta) * (mean_delta * (1 - mean_delta) / var_vax - 1)
current = np.random.beta(a_vax, b_vax, mob_samples)
new_delta.append(current.tolist())
mean_omicron = vaccination_by_state_omicron.loc[state][d]
a_vax = mean_omicron * (mean_omicron * (1 - mean_omicron) / var_vax - 1)
b_vax = (1 - mean_omicron) * (mean_omicron * (1 - mean_omicron) / var_vax - 1)
current = np.random.beta(a_vax, b_vax, mob_samples)
new_omicron.append(current.tolist())
vacc_sims_delta = np.vstack(new_delta)
vacc_sims_omicron = np.vstack(new_omicron)
dd_vacc = [
last_fit_date + timedelta(days=x)
for x in range(1, n_forecast + extra_days_vacc + 1)
]
for j, var in enumerate(
predictors
+ ["md_prop"]
+ ["masks_prop"]
+ ["vaccination_delta"]
+ ["vaccination_omicron"]
):
# Record data
axs = axes[j]
if (state == "AUS") and (var == "md_prop"):
continue
if var == "md_prop":
outdata["type"].extend([var] * len(dd_md))
outdata["state"].extend([state] * len(dd_md))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_md])
outdata["mean"].extend(np.mean(md_sims, axis=1))
outdata["std"].extend(np.std(md_sims, axis=1))
elif var == "masks_prop":
outdata["type"].extend([var] * len(dd_masks))
outdata["state"].extend([state] * len(dd_masks))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_masks])
outdata["mean"].extend(np.mean(masks_sims, axis=1))
outdata["std"].extend(np.std(masks_sims, axis=1))
elif var == "vaccination_delta":
outdata["type"].extend([var] * len(dd_vacc))
outdata["state"].extend([state] * len(dd_vacc))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_vacc])
outdata["mean"].extend(np.mean(vacc_sims_delta, axis=1))
outdata["std"].extend(np.std(vacc_sims_delta, axis=1))
elif var == "vaccination_omicron":
outdata["type"].extend([var] * len(dd_vacc))
outdata["state"].extend([state] * len(dd_vacc))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_vacc])
outdata["mean"].extend(np.mean(vacc_sims_omicron, axis=1))
outdata["std"].extend(np.std(vacc_sims_omicron, axis=1))
else:
outdata["type"].extend([var] * len(dd))
outdata["state"].extend([state] * len(dd))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd])
outdata["mean"].extend(np.mean(sims[:, j, :], axis=1))
outdata["std"].extend(np.std(sims[:, j, :], axis=1))
if state in plot_states:
if var == "md_prop":
# md plot
axs[rownum, colnum].plot(prop[state].index, prop[state].values, lw=1)
axs[rownum, colnum].plot(dd_md, np.median(md_sims, axis=1), "k", lw=1)
axs[rownum, colnum].fill_between(
dd_md,
np.quantile(md_sims, 0.25, axis=1),
np.quantile(md_sims, 0.75, axis=1),
color="k",
alpha=0.1,
)
elif var == "masks_prop":
# masks plot
axs[rownum, colnum].plot(masks[state].index, masks[state].values, lw=1)
axs[rownum, colnum].plot(
dd_masks, np.median(masks_sims, axis=1), "k", lw=1
)
axs[rownum, colnum].fill_between(
dd_masks,
np.quantile(masks_sims, 0.25, axis=1),
np.quantile(masks_sims, 0.75, axis=1),
color="k",
alpha=0.1,
)
elif var == "vaccination_delta":
# vaccination plot
axs[rownum, colnum].plot(
vaccination_by_state_delta.loc[
state, ~vaccination_by_state_delta.loc[state].isna()
].index,
vaccination_by_state_delta.loc[
state, ~vaccination_by_state_delta.loc[state].isna()
].values,
lw=1,
)
axs[rownum, colnum].plot(
dd_vacc, np.median(vacc_sims_delta, axis=1), color="C1", lw=1
)
axs[rownum, colnum].fill_between(
dd_vacc,
np.quantile(vacc_sims_delta, 0.25, axis=1),
np.quantile(vacc_sims_delta, 0.75, axis=1),
color="C1",
alpha=0.1,
)
elif var == "vaccination_omicron":
# vaccination plot
axs[rownum, colnum].plot(
vaccination_by_state_omicron.loc[
state, ~vaccination_by_state_omicron.loc[state].isna()
].index,
vaccination_by_state_omicron.loc[
state, ~vaccination_by_state_omicron.loc[state].isna()
].values,
lw=1,
)
axs[rownum, colnum].plot(
dd_vacc, np.median(vacc_sims_omicron, axis=1), color="C1", lw=1
)
axs[rownum, colnum].fill_between(
dd_vacc,
np.quantile(vacc_sims_omicron, 0.25, axis=1),
np.quantile(vacc_sims_omicron, 0.75, axis=1),
color="C1",
alpha=0.1,
)
else:
# all other predictors
axs[rownum, colnum].plot(
dates, df_google[df_google["state"] == state][var].values, lw=1
)
axs[rownum, colnum].fill_between(
dates,
np.percentile(Rmed_array[:, j, :], 25, axis=1),
np.percentile(Rmed_array[:, j, :], 75, axis=1),
alpha=0.5,
)
axs[rownum, colnum].plot(dd, sims_med[:, j], color="C1", lw=1)
axs[rownum, colnum].fill_between(
dd, sims_q25[:, j], sims_q75[:, j], color="C1", alpha=0.1
)
# axs[rownum,colnum].axvline(dd[-num_forecast_days], ls = '--', color = 'black', lw=1) # plotting a vertical line at the end of the data date
# axs[rownum,colnum].axvline(dd[-(num_forecast_days+truncation_days)], ls = '-.', color='grey', lw=1) # plotting a vertical line at the forecast date
axs[rownum, colnum].set_title(state)
# plotting horizontal line at 1
axs[rownum, colnum].axhline(1, ls="--", c="k", lw=1)
axs[rownum, colnum].set_title(state)
axs[rownum, colnum].tick_params("x", rotation=90)
axs[rownum, colnum].tick_params("both", labelsize=8)
# plot the start date of the data and indicators of the data we are actually fitting to (in grey)
axs[rownum, colnum].axvline(data_date, ls="-.", color="black", lw=1)
if j < len(predictors):
axs[rownum, colnum].set_ylabel(
predictors[j].replace("_", " ")[:-5], fontsize=7
)
elif var == "md_prop":
axs[rownum, colnum].set_ylabel(
"Proportion of respondents\n micro-distancing", fontsize=7
)
elif var == "masks_prop":
axs[rownum, colnum].set_ylabel(
"Proportion of respondents\n wearing masks", fontsize=7
)
elif var == "vaccination_delta" or var == "vaccination_omicron":
axs[rownum, colnum].set_ylabel(
"Reduction in TP \n from vaccination", fontsize=7
)
# historically we want to store the higher variance mobilities
state_Rmed[state] = Rmed_array
state_sims[state] = sims
os.makedirs(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts",
exist_ok=True,
)
for i, fig in enumerate(figs):
fig.text(0.5, 0.02, "Date", ha="center", va="center", fontsize=15)
if i < len(predictors): # this plots the google mobility forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/"
+ str(predictors[i])
+ ".png",
dpi=400,
)
elif i == len(predictors): # this plots the microdistancing forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/micro_dist.png",
dpi=400,
)
elif i == len(predictors) + 1: # this plots the microdistancing forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/mask_wearing.png",
dpi=400,
)
elif i == len(predictors) + 2: # finally this plots the delta VE forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/delta_vaccination.png",
dpi=400,
)
else: # finally this plots the omicron VE forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/omicron_vaccination.png",
dpi=400,
)
df_out = pd.DataFrame.from_dict(outdata)
df_md = df_out.loc[df_out.type == "md_prop"]
df_masks = df_out.loc[df_out.type == "masks_prop"]
df_out = df_out.loc[df_out.type != "vaccination_delta"]
df_out = df_out.loc[df_out.type != "vaccination_omicron"]
df_out = df_out.loc[df_out.type != "md_prop"]
df_out = df_out.loc[df_out.type != "masks_prop"]
df_forecast = pd.pivot_table(
df_out, columns=["type"], index=["date", "state"], values=["mean"]
)
df_std = pd.pivot_table(
df_out, columns=["type"], index=["date", "state"], values=["std"]
)
df_forecast_md = pd.pivot_table(
df_md, columns=["state"], index=["date"], values=["mean"]
)
df_forecast_md_std = pd.pivot_table(
df_md, columns=["state"], index=["date"], values=["std"]
)
df_forecast_masks = pd.pivot_table(
df_masks, columns=["state"], index=["date"], values=["mean"]
)
df_forecast_masks_std = pd.pivot_table(
df_masks, columns=["state"], index=["date"], values=["std"]
)
# align with google order in columns
df_forecast = df_forecast.reindex([("mean", val) for val in predictors], axis=1)
df_std = df_std.reindex([("std", val) for val in predictors], axis=1)
df_forecast.columns = predictors # remove the tuple name of columns
df_std.columns = predictors
df_forecast = df_forecast.reset_index()
df_std = df_std.reset_index()
df_forecast.date = pd.to_datetime(df_forecast.date)
df_std.date = pd.to_datetime(df_std.date)
df_forecast_md = df_forecast_md.reindex([("mean", state) for state in states], axis=1)
df_forecast_md_std = df_forecast_md_std.reindex(
[("std", state) for state in states], axis=1
)
df_forecast_md.columns = states
df_forecast_md_std.columns = states
df_forecast_md = df_forecast_md.reset_index()
df_forecast_md_std = df_forecast_md_std.reset_index()
df_forecast_md.date = pd.to_datetime(df_forecast_md.date)
df_forecast_md_std.date = pd.to_datetime(df_forecast_md_std.date)
df_forecast_masks = df_forecast_masks.reindex(
[("mean", state) for state in states], axis=1
)
df_forecast_masks_std = df_forecast_masks_std.reindex(
[("std", state) for state in states], axis=1
)
df_forecast_masks.columns = states
df_forecast_masks_std.columns = states
df_forecast_masks = df_forecast_masks.reset_index()
df_forecast_masks_std = df_forecast_masks_std.reset_index()
df_forecast_masks.date = pd.to_datetime(df_forecast_masks.date)
df_forecast_masks_std.date = pd.to_datetime(df_forecast_masks_std.date)
df_R = df_google[["date", "state"] + mov_values + [val + "_std" for val in mov_values]]
df_R = pd.concat([df_R, df_forecast], ignore_index=True, sort=False)
df_R["policy"] = (df_R.date >= "2020-03-20").astype("int8")
df_md = pd.concat([prop, df_forecast_md.set_index("date")])
df_masks = pd.concat([masks, df_forecast_masks.set_index("date")])
# now we read in the ve time series and create an adjusted timeseries from March 1st
# that includes no effect prior
vaccination_by_state = pd.read_csv(
results_dir + "adjusted_vaccine_ts_delta" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
# there are a couple NA's early on in the time series but is likely due to slightly different start dates
vaccination_by_state.fillna(1, inplace=True)
vaccination_by_state = vaccination_by_state[["state", "date", "effect"]]
vaccination_by_state = vaccination_by_state.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# initialise a complete dataframe which will be the full VE timeseries plus the forecasted VE
df_ve_delta = pd.DataFrame()
# loop over states and get the offset compoonenets of the full VE
before_vacc_dates = pd.date_range(
start_date, vaccination_by_state.columns[0] - timedelta(days=1), freq="d"
)
# this is just a df of ones with all the missing dates as indices (8 comes from 8 jurisdictions)
before_vacc_Reff_reduction = pd.DataFrame(np.ones(((1, len(before_vacc_dates)))))
before_vacc_Reff_reduction.columns = before_vacc_dates
for state in states:
before_vacc_Reff_reduction.index = {state}
# merge the vaccine data and the 1's dataframes
df_ve_delta[state] = pd.concat(
[before_vacc_Reff_reduction.loc[state].T, vaccination_by_state.loc[state].T]
)
# clip off extra days
df_ve_delta = df_ve_delta[
df_ve_delta.index <= pd.to_datetime(today) + timedelta(days=num_forecast_days)
]
# save the forecasted vaccination line
df_ve_delta.to_csv(
results_dir
+ "forecasted_vaccination_delta"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
vaccination_by_state = pd.read_csv(
results_dir
+ "adjusted_vaccine_ts_omicron"
+ data_date.strftime("%Y-%m-%d")
+ ".csv",
parse_dates=["date"],
)
# there are a couple NA's early on in the time series but is likely due to slightly different start dates
vaccination_by_state.fillna(1, inplace=True)
vaccination_by_state = vaccination_by_state[["state", "date", "effect"]]
vaccination_by_state = vaccination_by_state.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# initialise a complete dataframe which will be the full VE timeseries plus the forecasted VE
df_ve_omicron = pd.DataFrame()
# loop over states and get the offset compoonenets of the full VE
before_vacc_dates = pd.date_range(
start_date, pd.to_datetime(omicron_start_date) - timedelta(days=1), freq="d"
)
# this is just a df of ones with all the missing dates as indices (8 comes from 8 jurisdictions)
before_vacc_Reff_reduction = pd.DataFrame(np.ones(((1, len(before_vacc_dates)))))
before_vacc_Reff_reduction.columns = before_vacc_dates
for state in states:
before_vacc_Reff_reduction.index = {state}
# merge the vaccine data and the 1's dataframes
df_ve_omicron[state] = pd.concat(
[
before_vacc_Reff_reduction.loc[state].T,
vaccination_by_state.loc[state][
vaccination_by_state.loc[state].index
>= pd.to_datetime(omicron_start_date)
],
]
)
df_ve_omicron = df_ve_omicron[
df_ve_omicron.index <= pd.to_datetime(today) + timedelta(days=num_forecast_days)
]
# save the forecasted vaccination line
df_ve_omicron.to_csv(
results_dir
+ "forecasted_vaccination_omicron"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
print("============")
print("Plotting forecasted estimates")
print("============")
expo_decay = True
theta_md = np.tile(df_samples["theta_md"].values, (df_md["NSW"].shape[0], 1))
fig, ax = plt.subplots(figsize=(12, 9), nrows=4, ncols=2, sharex=True, sharey=True)
for i, state in enumerate(plot_states):
# np.random.normal(df_md[state].values, df_md_std.values)
prop_sim = df_md[state].values
if expo_decay:
md = ((1 + theta_md).T ** (-1 * prop_sim)).T
else:
md = 2 * expit(-1 * theta_md * prop_sim[:, np.newaxis])
row = i // 2
col = i % 2
ax[row, col].plot(
df_md[state].index, np.median(md, axis=1), label="Microdistancing"
)
ax[row, col].fill_between(
df_md[state].index,
np.quantile(md, 0.25, axis=1),
np.quantile(md, 0.75, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].fill_between(
df_md[state].index,
np.quantile(md, 0.05, axis=1),
np.quantile(md, 0.95, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].set_title(state)
ax[row, col].tick_params("x", rotation=45)
ax[row, col].set_xticks(
[df_md[state].index.values[-n_forecast - extra_days_md]],
minor=True,
)
ax[row, col].xaxis.grid(which="minor", linestyle="-.", color="grey", linewidth=1)
fig.text(
0.03,
0.5,
"Multiplicative effect \n of micro-distancing $M_d$",
ha="center",
va="center",
rotation="vertical",
fontsize=20,
)
fig.text(0.5, 0.04, "Date", ha="center", va="center", fontsize=20)
plt.tight_layout(rect=[0.05, 0.04, 1, 1])
fig.savefig(
"figs/"
+ "mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/md_factor.png",
dpi=144,
)
theta_masks = np.tile(df_samples["theta_masks"].values, (df_masks["NSW"].shape[0], 1))
fig, ax = plt.subplots(figsize=(12, 9), nrows=4, ncols=2, sharex=True, sharey=True)
for i, state in enumerate(plot_states):
# np.random.normal(df_md[state].values, df_md_std.values)
masks_prop_sim = df_masks[state].values
if expo_decay:
mask_wearing_factor = ((1 + theta_masks).T ** (-1 * masks_prop_sim)).T
else:
mask_wearing_factor = 2 * expit(
-1 * theta_masks * masks_prop_sim[:, np.newaxis]
)
row = i // 2
col = i % 2
ax[row, col].plot(
df_masks[state].index,
np.median(mask_wearing_factor, axis=1),
label="Microdistancing",
)
ax[row, col].fill_between(
df_masks[state].index,
np.quantile(mask_wearing_factor, 0.25, axis=1),
np.quantile(mask_wearing_factor, 0.75, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].fill_between(
df_masks[state].index,
np.quantile(mask_wearing_factor, 0.05, axis=1),
np.quantile(mask_wearing_factor, 0.95, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].set_title(state)
ax[row, col].tick_params("x", rotation=45)
ax[row, col].set_xticks(
[df_masks[state].index.values[-n_forecast - extra_days_masks]], minor=True
)
ax[row, col].xaxis.grid(which="minor", linestyle="-.", color="grey", linewidth=1)
fig.text(
0.03,
0.5,
"Multiplicative effect \n of mask-wearing $M_d$",
ha="center",
va="center",
rotation="vertical",
fontsize=20,
)
fig.text(0.5, 0.04, "Date", ha="center", va="center", fontsize=20)
plt.tight_layout(rect=[0.05, 0.04, 1, 1])
fig.savefig(
"figs/"
+ "mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/mask_wearing_factor.png",
dpi=144,
)
df_R = df_R.sort_values("date")
# samples = df_samples.sample(n_samples) # test on sample of 2
# keep all samples
samples = df_samples.iloc[:mob_samples, :]
# for strain in ("Delta", "Omicron"):
# samples = df_samples
# flags for advanced scenario modelling
advanced_scenario_modelling = False
save_for_SA = False
# since this can be useful, predictor ordering is:
# ['retail_and_recreation_7days', 'grocery_and_pharmacy_7days', 'parks_7days', 'transit_stations_7days', 'workplaces_7days']
typ = "R_L"
forecast_type = ["R_L"]
for strain in ("Delta", "Omicron"):
print("============")
print("Calculating", strain, "TP")
print("============")
state_Rs = {
"state": [],
"date": [],
"type": [],
"median": [],
"lower": [],
"upper": [],
"bottom": [],
"top": [],
"mean": [],
"std": [],
}
ban = "2020-03-20"
# VIC and NSW allow gatherings of up to 20 people, other jurisdictions allow for
new_pol = "2020-06-01"
expo_decay = True
# start and end date for the third wave
# Subtract 10 days to avoid right truncation
third_end_date = data_date - pd.Timedelta(days=truncation_days)
typ_state_R = {}
mob_forecast_date = df_forecast.date.min()
state_key = {
"ACT": "1",
"NSW": "2",
"NT": "3",
"QLD": "4",
"SA": "5",
"TAS": "6",
"VIC": "7",
"WA": "8",
}
total_N_p_third_omicron = 0
for v in third_date_range.values():
tmp = sum(v >= pd.to_datetime(omicron_start_date))
# add a plus one for inclusion of end date (the else 0 is due to QLD having no Omicron potential)
total_N_p_third_omicron += tmp if tmp > 0 else 0
state_R = {}
for (kk, state) in enumerate(states):
# sort df_R by date so that rows are dates. rows are dates, columns are predictors
df_state = df_R.loc[df_R.state == state]
dd = df_state.date
post_values = samples[predictors].values.T
prop_sim = df_md[state].values
# grab vaccination data
vacc_ts_delta = df_ve_delta[state]
vacc_ts_omicron = df_ve_omicron[state]
# take right size of md to be N by N
theta_md = np.tile(samples["theta_md"].values, (df_state.shape[0], 1))
theta_masks = np.tile(samples["theta_masks"].values, (df_state.shape[0], 1))
r = samples["r[" + str(kk + 1) + "]"].values
tau = samples["tau[" + str(kk + 1) + "]"].values
m0 = samples["m0[" + str(kk + 1) + "]"].values
m1 = samples["m1[" + str(kk + 1) + "]"].values
# m1 = 1.0
md = ((1 + theta_md).T ** (-1 * prop_sim)).T
masks = ((1 + theta_masks).T ** (-1 * masks_prop_sim)).T
third_states_indices = {
state: index + 1 for (index, state) in enumerate(third_states)
}
third_days = {k: v.shape[0] for (k, v) in third_date_range.items()}
third_days_cumulative = np.append(
[0], np.cumsum([v for v in third_days.values()])
)
vax_idx_ranges = {
k: range(third_days_cumulative[i], third_days_cumulative[i + 1])
for (i, k) in enumerate(third_days.keys())
}
third_days_tot = sum(v for v in third_days.values())
# get the sampled vaccination effect (this will be incomplete as it's only over the fitting period)
sampled_vax_effects_all = samples[
["ve_delta[" + str(j + 1) + "]" for j in range(third_days_tot)]
].T
vacc_tmp = sampled_vax_effects_all.iloc[vax_idx_ranges[state], :]
# now we layer in the posterior vaccine multiplier effect which ill be a (T,mob_samples) array
# get before and after fitting and tile them
vacc_ts_data_before = pd.concat(
[vacc_ts_delta.loc[vacc_ts_delta.index < third_date_range[state][0]]]
* mob_samples,
axis=1,
).to_numpy()
vacc_ts_data_after = pd.concat(
[vacc_ts_delta.loc[vacc_ts_delta.index > third_date_range[state][-1]]]
* mob_samples,
axis=1,
).to_numpy()
# merge in order
vacc_ts_delta = np.vstack(
[vacc_ts_data_before, vacc_tmp, vacc_ts_data_after]
)
# construct a range of dates for omicron which starts at the maximum of the start date for that state or the Omicron start date
third_omicron_date_range = {
k: pd.date_range(
start=max(v[0], | pd.to_datetime(omicron_start_date) | pandas.to_datetime |
# import pandas
import pandas as pd
import numpy as np
import statsmodels.api as sm
| pd.set_option('display.width', 200) | pandas.set_option |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 26 15:39:02 2018
@author: joyce
"""
import pandas as pd
import numpy as np
from numpy.matlib import repmat
from stats import get_stockdata_from_sql,get_tradedate,Corr,Delta,Rank,Cross_max,\
Cross_min,Delay,Sum,Mean,STD,TsRank,TsMax,TsMin,DecayLinear,Count,SMA,Cov,DTM,DBM,\
Highday,Lowday,HD,LD,RegBeta,RegResi,SUMIF,get_indexdata_from_sql,timer,get_fama
class stAlpha(object):
def __init__(self,begin,end):
self.begin = begin
self.end = end
self.close = get_stockdata_from_sql(1,self.begin,self.end,'Close')
self.open = get_stockdata_from_sql(1,self.begin,self.end,'Open')
self.high = get_stockdata_from_sql(1,self.begin,self.end,'High')
self.low = get_stockdata_from_sql(1,self.begin,self.end,'Low')
self.volume = get_stockdata_from_sql(1,self.begin,self.end,'Vol')
self.amt = get_stockdata_from_sql(1,self.begin,self.end,'Amount')
self.vwap = get_stockdata_from_sql(1,self.begin,self.end,'Vwap')
self.ret = get_stockdata_from_sql(1,begin,end,'Pctchg')
self.close_index = get_indexdata_from_sql(1,begin,end,'close','000001.SH')
self.open_index = get_indexdata_from_sql(1,begin,end,'open','000001.SH')
# self.mkt = get_fama_from_sql()
@timer
def alpha1(self):
volume = self.volume
ln_volume = np.log(volume)
ln_volume_delta = Delta(ln_volume,1)
close = self.close
Open = self.open
price_temp = pd.concat([close,Open],axis = 1,join = 'outer')
price_temp['ret'] = (price_temp['Close'] - price_temp['Open'])/price_temp['Open']
del price_temp['Close'],price_temp['Open']
r_ln_volume_delta = Rank(ln_volume_delta)
r_ret = Rank(price_temp)
rank = pd.concat([r_ln_volume_delta,r_ret],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,6)
alpha = corr
alpha.columns = ['alpha1']
return alpha
@timer
def alpha2(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
temp['alpha'] = (2 * temp['Close'] - temp['Low'] - temp['High']) \
/ (temp['High'] - temp['Low'])
del temp['Close'],temp['Low'],temp['High']
alpha = -1 * Delta(temp,1)
alpha.columns = ['alpha2']
return alpha
@timer
def alpha3(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
close_delay = Delay(pd.DataFrame(temp['Close']),1)
close_delay.columns = ['close_delay']
temp = pd.concat([temp,close_delay],axis = 1,join = 'inner')
temp['min'] = Cross_max(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['Low']))
temp['max'] = Cross_min(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['High']))
temp['alpha_temp'] = 0
temp['alpha_temp'][temp['Close'] > temp['close_delay']] = temp['Close'] - temp['min']
temp['alpha_temp'][temp['Close'] < temp['close_delay']] = temp['Close'] - temp['max']
alpha = Sum(pd.DataFrame(temp['alpha_temp']),6)
alpha.columns = ['alpha3']
return alpha
@timer
def alpha4(self):
close = self.close
volume = self.volume
close_mean_2 = Mean(close,2)
close_mean_8 = Mean(close,8)
close_std = STD(close,8)
volume_mean_20 = Mean(volume,20)
data = pd.concat([close_mean_2,close_mean_8,close_std,volume_mean_20,volume],axis = 1,join = 'inner')
data.columns = ['close_mean_2','close_mean_8','close_std','volume_mean_20','volume']
data['alpha'] = -1
data['alpha'][data['close_mean_2'] < data['close_mean_8'] - data['close_std']] = 1
data['alpha'][data['volume']/data['volume_mean_20'] >= 1] = 1
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha4']
return alpha
@timer
def alpha5(self):
volume = self.volume
high = self.high
r1 = TsRank(volume,5)
r2 = TsRank(high,5)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(corr,5)
alpha.columns = ['alpha5']
return alpha
@timer
def alpha6(self):
Open = self.open
high = self.high
df = pd.concat([Open,high],axis = 1,join = 'inner')
df['price'] = df['Open'] * 0.85 + df['High'] * 0.15
df_delta = Delta(pd.DataFrame(df['price']),1)
alpha = Rank(np.sign(df_delta))
alpha.columns = ['alpha6']
return alpha
@timer
def alpha7(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_delta = Delta(volume,3)
data = pd.concat([close,vwap],axis = 1,join = 'inner')
data['diff'] = data['Vwap'] - data['Close']
r1 = Rank(TsMax(pd.DataFrame(data['diff']),3))
r2 = Rank(TsMin(pd.DataFrame(data['diff']),3))
r3 = Rank(volume_delta)
rank = pd.concat([r1,r2,r3],axis = 1,join = 'inner')
rank.columns = ['r1','r2','r3']
alpha = (rank['r1'] + rank['r2'])* rank['r3']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha7']
return alpha
@timer
def alpha8(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
data_price = (data['High'] + data['Low'])/2 * 0.2 + data['Vwap'] * 0.2
data_price_delta = Delta(pd.DataFrame(data_price),4) * -1
alpha = Rank(data_price_delta)
alpha.columns = ['alpha8']
return alpha
@timer
def alpha9(self):
high = self.high
low = self.low
volume = self.volume
data = pd.concat([high,low,volume],axis = 1,join = 'inner')
data['price']= (data['High'] + data['Low'])/2
data['price_delay'] = Delay(pd.DataFrame(data['price']),1)
alpha_temp = (data['price'] - data['price_delay']) * (data['High'] - data['Low'])/data['Vol']
alpha_temp_unstack = alpha_temp.unstack(level = 'ID')
alpha = alpha_temp_unstack.ewm(span = 7, ignore_na = True, min_periods = 7).mean()
alpha_final = alpha.stack()
alpha = pd.DataFrame(alpha_final)
alpha.columns = ['alpha9']
return alpha
@timer
def alpha10(self):
ret = self.ret
close = self.close
ret_std = STD(pd.DataFrame(ret),20)
ret_std.columns = ['ret_std']
data = pd.concat([ret,close,ret_std],axis = 1, join = 'inner')
temp1 = pd.DataFrame(data['ret_std'][data['Pctchg'] < 0])
temp2 = pd.DataFrame(data['Close'][data['Pctchg'] >= 0])
temp1.columns = ['temp']
temp2.columns = ['temp']
temp = pd.concat([temp1,temp2],axis = 0,join = 'outer')
temp_order = pd.concat([data,temp],axis = 1)
temp_square = pd.DataFrame(np.power(temp_order['temp'],2))
alpha_temp = TsMax(temp_square,5)
alpha = Rank(alpha_temp)
alpha.columns = ['alpha10']
return alpha
@timer
def alpha11(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
data = pd.concat([high,low,close,volume],axis = 1,join = 'inner')
data_temp = (data['Close'] - data['Low']) -(data['High'] - data['Close'])\
/(data['High'] - data['Low']) * data['Vol']
alpha = Sum(pd.DataFrame(data_temp),6)
alpha.columns = ['alpha11']
return alpha
@timer
def alpha12(self):
Open = self.open
vwap = self.vwap
close = self.close
data = pd.concat([Open,vwap,close],axis = 1, join = 'inner')
data['p1'] = data['Open'] - Mean(data['Open'],10)
data['p2'] = data['Close'] - data['Vwap']
r1 = Rank(pd.DataFrame(data['p1']))
r2 = Rank(pd.DataFrame(np.abs(data['p2'])))
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
alpha = rank['r1'] - rank['r2']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha12']
return alpha
@timer
def alpha13(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
alpha = (data['High'] + data['Low'])/2 - data['Vwap']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha13']
return alpha
@timer
def alpha14(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close'] - data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha14']
return alpha
@timer
def alpha15(self):
Open = self.open
close = self.close
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([Open,close_delay],axis = 1,join = 'inner')
alpha = data['Open']/data['close_delay'] - 1
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha15']
return alpha
@timer
def alpha16(self):
vwap = self.vwap
volume = self.volume
data = pd.concat([vwap,volume],axis = 1, join = 'inner')
r1 = Rank(pd.DataFrame(data['Vol']))
r2 = Rank(pd.DataFrame(data['Vwap']))
rank = pd.concat([r1,r2],axis = 1, join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(Rank(corr),5)
alpha.columns = ['alpha16']
return alpha
@timer
def alpha17(self):
vwap = self.vwap
close = self.close
data = pd.concat([vwap,close],axis = 1, join = 'inner')
data['vwap_max15'] = TsMax(data['Vwap'],15)
data['close_delta5'] = Delta(data['Close'],5)
temp = np.power(data['vwap_max15'],data['close_delta5'])
alpha = Rank(pd.DataFrame(temp))
alpha.columns = ['alpha17']
return alpha
@timer
def alpha18(self):
"""
this one is similar with alpha14
"""
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close']/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha18']
return alpha
@timer
def alpha19(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
data['temp1'] = (data['Close'] - data['close_delay'])/data['close_delay']
data['temp2'] = (data['Close'] - data['close_delay'])/data['Close']
temp1 = pd.DataFrame(data['temp1'][data['Close'] < data['close_delay']])
temp2 = pd.DataFrame(data['temp2'][data['Close'] >= data['close_delay']])
temp1.columns = ['temp']
temp2.columns = ['temp']
temp = pd.concat([temp1,temp2],axis = 0)
data = pd.concat([data,temp],axis = 1,join = 'outer')
alpha = pd.DataFrame(data['temp'])
alpha.columns = ['alpha19']
return alpha
@timer
def alpha20(self):
close = self.close
close_delay = Delay(close,6)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha20']
return alpha
@timer
def alpha21(self):
close = self.close
close_mean = Mean(close,6)
alpha = RegBeta(0,close_mean,None,6)
alpha.columns = ['alpha21']
return alpha
@timer
def alpha22(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1,join = 'inner')
data.columns = ['close','close_mean']
temp = pd.DataFrame((data['close'] - data['close_mean'])/data['close_mean'])
temp_delay = Delay(temp,3)
data_temp = pd.concat([temp,temp_delay],axis = 1,join = 'inner')
data_temp.columns = ['temp','temp_delay']
temp2 = pd.DataFrame(data_temp['temp'] - data_temp['temp_delay'])
alpha = SMA(temp2,12,1)
alpha.columns = ['alpha22']
return alpha
@timer
def alpha23(self):
close = self.close
close_std = STD(close,20)
close_delay = Delay(close,1)
data = pd.concat([close,close_std,close_delay],axis = 1, join = 'inner')
data.columns = ['Close','close_std','close_delay']
data['temp'] = data['close_std']
data['temp'][data['Close'] <= data['close_delay']] = 0
temp = pd.DataFrame(data['temp'])
sma1 = SMA(temp,20,1)
sma2 = SMA(pd.DataFrame(data['close_std']),20,1)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'])
alpha.columns = ['alpha23']
return alpha
@timer
def alpha24(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis=1 ,join = 'inner' )
temp = data['Close'] - data['close_delay']
temp = pd.DataFrame(temp)
alpha = SMA(temp,5,1)
alpha.columns = ['alpha24']
return alpha
@timer
def alpha25(self):
close = self.close
close_delta = Delta(close,7)
ret = self.ret
r1 = Rank(close_delta)
r3 = Rank(Sum(ret,250))
volume = self.volume
volume_mean = Mean(pd.DataFrame(volume['Vol']),20)
volume_mean.columns = ['volume_mean']
data = pd.concat([volume,volume_mean],axis = 1,join = 'inner')
temp0 = pd.DataFrame(data['Vol']/data['volume_mean'])
temp = DecayLinear(temp0,9)
r2 = Rank(temp)
rank = pd.concat([r1,r2,r3],axis = 1, join = 'inner')
rank.columns = ['r1','r2','r3']
alpha = pd.DataFrame(-1 * rank['r1'] * (1 - rank['r2']) * rank['r3'])
alpha.columns = ['alpha25']
return alpha
@timer
def alpha26(self):
close = self.close
vwap = self.vwap
close_mean7 = Mean(close,7)
close_mean7.columns = ['close_mean7']
close_delay5 = Delay(close,5)
close_delay5.columns = ['close_delay5']
data = pd.concat([vwap,close_delay5],axis = 1,join = 'inner')
corr = Corr(data,230)
corr.columns = ['corr']
data_temp = pd.concat([corr,close_mean7,close],axis = 1,join = 'inner')
alpha = data_temp['close_mean7'] - data_temp['Close'] + data_temp['corr']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha26']
return alpha
@timer
def alpha27(self):
"""
uncompleted
"""
close = self.close
close_delay3 = Delay(close,3)
close_delay6 = Delay(close,6)
data = pd.concat([close,close_delay3,close_delay6],axis = 1,join = 'inner')
data.columns = ['close','close_delay3','close_delay6']
temp1 = pd.DataFrame((data['close'] - data['close_delay3'])/data['close_delay3'] * 100)
temp2 = pd.DataFrame((data['close'] - data['close_delay6'])/data['close_delay6'] * 100)
data_temp = pd.concat([temp1,temp2],axis = 1,join = 'inner')
data_temp.columns = ['temp1','temp2']
temp = pd.DataFrame(data_temp['temp1'] + data_temp['temp2'])
alpha = DecayLinear(temp,12)
alpha.columns = ['alpha27']
return alpha
@timer
def alpha28(self):
close = self.close
low = self.low
high = self.high
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['Close','low_min','high_max']
temp1 = pd.DataFrame((data['Close'] - data['low_min']) /(data['high_max'] - data['low_min']))
sma1 = SMA(temp1,3,1)
sma2 = SMA(sma1,3,1)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1'] * 2 - sma['sma2'] * 3)
alpha.columns = ['alpha28']
return alpha
@timer
def alpha29(self):
close = self.close
volume = self.volume
close_delay = Delay(close,6)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay'] * data['Vol']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha29']
return alpha
@timer
def alpha30(self):
close = self.close
close_delay = Delay(close,1)
@timer
def alpha31(self):
close = self.close
close_delay = Delay(close,12)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha31']
return alpha
@timer
def alpha32(self):
volume = self.volume
high = self.high
r1 = Rank(volume)
r2 = Rank(high)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
corr = Corr(rank,3)
r = Rank(corr)
alpha = -1 * Sum(r,3)
alpha.columns = ['alpha32']
return alpha
@timer
def alpha33(self):
low = self.low
volume = self.volume
ret = self.ret
low_min = TsMin(low,5)
low_min_delay = Delay(low_min,5)
data1 = pd.concat([low_min,low_min_delay],axis = 1,join = 'inner')
data1.columns = ['low_min','low_min_delay']
ret_sum240 = Sum(ret,240)
ret_sum20 = Sum(ret,20)
ret_temp = pd.concat([ret_sum240,ret_sum20],axis = 1, join = 'inner')
ret_temp.columns = ['ret240','ret20']
temp1 = pd.DataFrame(data1['low_min_delay'] - data1['low_min'])
temp2 = pd.DataFrame((ret_temp['ret240'] - ret_temp['ret20'])/220)
r_temp2 = Rank(temp2)
r_volume = TsRank(volume,5)
temp = pd.concat([temp1,r_temp2,r_volume],axis = 1,join = 'inner')
temp.columns = ['temp1','r_temp2','r_volume']
alpha = temp['temp1'] * temp['r_temp2'] * temp['r_volume']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha33']
return alpha
@timer
def alpha34(self):
close = self.close
close_mean = Mean(close,12)
close_mean.columns = ['close_mean']
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
alpha = pd.DataFrame(data['close_mean']/data['Close'])
alpha.columns = ['alpha34']
return alpha
@timer
def alpha35(self):
volume = self.volume
Open = self.open
open_delay = Delay(Open,1)
open_delay.columns = ['open_delay']
open_linear = DecayLinear(Open,17)
open_linear.columns = ['open_linear']
open_delay_temp = DecayLinear(open_delay,15)
r1 = Rank(open_delay_temp)
data = pd.concat([Open,open_linear],axis = 1,join = 'inner')
Open_temp = data['Open'] * 0.65 + 0.35 * data['open_linear']
rank = pd.concat([volume,Open_temp],axis = 1, join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,7)
r2 = Rank(-1 * corr)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = Cross_min(pd.DataFrame(r['r1']),pd.DataFrame(r['r2']))
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha35']
return alpha
@timer
def alpha36(self):
volume = self.volume
vwap = self.vwap
r1 = Rank(volume)
r2 = Rank(vwap)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
corr = Corr(rank,6)
temp = Sum(corr,2)
alpha = Rank(temp)
alpha.columns = ['alpha36']
return alpha
@timer
def alpha37(self):
Open = self.open
ret = self.ret
open_sum = Sum(Open,5)
ret_sum = Sum(ret,5)
data = pd.concat([open_sum,ret_sum],axis = 1,join = 'inner')
data.columns = ['open_sum','ret_sum']
temp = data['open_sum'] * data['ret_sum']
temp_delay = Delay(temp,10)
data_temp = pd.concat([temp,temp_delay],axis = 1,join = 'inner')
data_temp.columns = ['temp','temp_delay']
alpha = -1 * Rank(pd.DataFrame(data_temp['temp'] - data_temp['temp_delay']))
alpha.columns = ['alpha37']
return alpha
@timer
def alpha38(self):
high = self.high
high_mean = Mean(high,20)
high_delta = Delta(high,2)
data = pd.concat([high,high_mean,high_delta],axis = 1,join = 'inner')
data.columns = ['high','high_mean','high_delta']
data['alpha'] = -1 * data['high_delta']
data['alpha'][data['high_mean'] >= data['high']] = 0
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha38']
return alpha
@timer
def alpha39(self):
close = self.close
Open = self.open
vwap = self.vwap
volume = self.volume
close_delta2 = Delta(close,2)
close_delta2_decay = DecayLinear(close_delta2,8)
r1 = Rank(close_delta2_decay)
price_temp = pd.concat([vwap,Open],axis = 1,join = 'inner')
price = pd.DataFrame(price_temp['Vwap'] * 0.3 + price_temp['Open'] * 0.7)
volume_mean = Mean(volume,180)
volume_mean_sum = Sum(volume_mean,37)
rank = pd.concat([price,volume_mean_sum],axis = 1,join = 'inner')
corr = Corr(rank,14)
corr_decay = DecayLinear(corr,12)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r2'] - r['r1'])
alpha.columns = ['alpha39']
return alpha
@timer
def alpha40(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
data = pd.concat([close,volume,close_delay],axis = 1, join = 'inner')
data.columns = ['close','volume','close_delay']
data['temp1'] = data['volume']
data['temp2'] = data['volume']
data['temp1'][data['close'] <= data['close_delay']] = 0
data['temp2'][data['close'] > data['close_delay']] = 0
s1 = Sum(pd.DataFrame(data['temp1']),26)
s2 = Sum(pd.DataFrame(data['temp2']),26)
s = pd.concat([s1,s2], axis = 1, join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(s['s1']/s['s2'] * 100)
alpha.columns = ['alpha40']
return alpha
@timer
def alpha41(self):
vwap = self.vwap
vwap_delta = Delta(vwap,3)
vwap_delta_max = TsMax(vwap_delta,5)
alpha = -1 * Rank(vwap_delta_max)
alpha.columns = ['alpha41']
return alpha
@timer
def alpha42(self):
high = self.high
volume = self.volume
high_std = STD(high,10)
r1 = Rank(high_std)
data = pd.concat([high,volume],axis = 1,join = 'inner')
corr = Corr(data,10)
r = pd.concat([r1,corr],axis = 1,join = 'inner')
r.columns = ['r1','corr']
alpha = pd.DataFrame(-1 * r['r1'] * r['corr'])
alpha.columns = ['alpha42']
return alpha
@timer
def alpha43(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,6)
alpha.columns = ['alpha43']
return alpha
@timer
def alpha44(self):
volume = self.volume
vwap = self.vwap
low = self.low
volume_mean = Mean(volume,10)
rank = pd.concat([low,volume_mean],axis = 1,join = 'inner')
corr = Corr(rank,7)
corr_decay = DecayLinear(corr,6)
r1 = TsRank(corr_decay,4)
vwap_delta = Delta(vwap,3)
vwap_delta_decay = DecayLinear(vwap_delta,10)
r2 = TsRank(vwap_delta_decay,15)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha44']
return alpha
@timer
def alpha45(self):
volume = self.volume
vwap = self.vwap
close = self.close
Open = self.open
price = pd.concat([close,Open],axis = 1,join = 'inner')
price['price'] = price['Close'] * 0.6 + price['Open'] * 0.4
price_delta = Delta(pd.DataFrame(price['price']),1)
r1 = Rank(price_delta)
volume_mean = Mean(volume,150)
data = pd.concat([vwap,volume_mean],axis = 1,join = 'inner')
corr = Corr(data,15)
r2 = Rank(corr)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha45']
return alpha
@timer
def alpha46(self):
close = self.close
close_mean3 = Mean(close,3)
close_mean6 = Mean(close,6)
close_mean12 = Mean(close,12)
close_mean24 = Mean(close,24)
data = pd.concat([close,close_mean3,close_mean6,close_mean12,close_mean24],axis = 1,join = 'inner')
data.columns = ['c','c3','c6','c12','c24']
alpha = (data['c3'] + data['c6'] + data['c12'] + data['c24'])/(4 * data['c'])
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha46']
return alpha
@timer
def alpha47(self):
close = self.close
low = self.low
high = self.high
high_max = TsMax(high,6)
low_min = TsMin(low,6)
data = pd.concat([high_max,low_min,close],axis = 1,join = 'inner')
data.columns = ['high_max','low_min','close']
temp = pd.DataFrame((data['high_max'] - data['close'])/(data['high_max'] - \
data['low_min']) * 100)
alpha = SMA(temp,9,1)
alpha.columns = ['alpha47']
return alpha
@timer
def alpha48(self):
close = self.close
volume = self.volume
temp1 = Delta(close,1)
temp1_delay1 = Delay(temp1,1)
temp1_delay2 = Delay(temp1,2)
data = pd.concat([temp1,temp1_delay1,temp1_delay2],axis = 1,join = 'inner')
data.columns = ['temp1','temp1_delay1','temp1_delay2']
temp2 = pd.DataFrame(np.sign(data['temp1']) + np.sign(data['temp1_delay1']) \
+ np.sign(data['temp1_delay2']))
volume_sum5 = Sum(volume,5)
volume_sum20 = Sum(volume,20)
data_temp = pd.concat([temp2,volume_sum5,volume_sum20],axis = 1,join = 'inner')
data_temp.columns = ['temp2','volume_sum5','volume_sum20']
temp3 = pd.DataFrame(data_temp['temp2'] * data_temp['volume_sum5']/\
data_temp['volume_sum20'])
alpha = -1 * Rank(temp3)
alpha.columns = ['alpha48']
return alpha
@timer
def alpha49(self):
low = self.low
high = self.high
data = pd.concat([low,high],axis = 1,join = 'inner')
price_sum = pd.DataFrame(data['Low'] + data['High'])
price_sum_delay = Delay(price_sum,1)
price = pd.concat([price_sum,price_sum_delay],axis = 1,join = 'inner')
price.columns = ['sum','sum_delay']
price['temp'] = 0
price['temp'][price['sum'] < price['sum_delay']] = 1
alpha = Sum(pd.DataFrame(price['temp']),12)
alpha.columns = ['alpha49']
return alpha
@timer
def alpha50(self):
low = self.low
high = self.high
data = pd.concat([low,high],axis = 1,join = 'inner')
price_sum = pd.DataFrame(data['Low'] + data['High'])
price_sum_delay = Delay(price_sum,1)
price = pd.concat([price_sum,price_sum_delay],axis = 1,join = 'inner')
price.columns = ['sum','sum_delay']
price['temp'] = 1
price['temp'][price['sum'] <= price['sum_delay']] = -1
alpha = Sum(pd.DataFrame(price['temp']),12)
alpha.columns = ['alpha50']
return alpha
@timer
def alpha51(self):
low = self.low
high = self.high
data = pd.concat([low,high],axis = 1,join = 'inner')
price_sum = pd.DataFrame(data['Low'] + data['High'])
price_sum_delay = Delay(price_sum,1)
price = pd.concat([price_sum,price_sum_delay],axis = 1,join = 'inner')
price.columns = ['sum','sum_delay']
price['temp'] = 1
price['temp'][price['sum'] <= price['sum_delay']] = 0
alpha = Sum(pd.DataFrame(price['temp']),12)
alpha.columns = ['alpha51']
return alpha
@timer
def alpha52(self):
low = self.low
high = self.high
close = self.close
data = pd.concat([low,high,close],axis = 1,join = 'inner')
data['sum_delay'] = Delay(pd.DataFrame((data['High'] + data['Low'] + data['Close'])/3),1)
temp1 = pd.DataFrame(data['High'] - data['sum_delay'])
temp1.columns = ['high_diff']
temp2 = pd.DataFrame(data['sum_delay'] - data['Low'])
temp2.columns = ['low_diff']
temp1['max'] = temp1['high_diff']
temp1['max'][temp1['high_diff'] < 0 ] = 0
temp2['max'] = temp2['low_diff']
temp2['max'][temp2['low_diff'] < 0 ] = 0
temp1_sum = Sum(pd.DataFrame(temp1['max']),26)
temp2_sum = Sum(pd.DataFrame(temp2['max']),26)
alpha_temp = pd.concat([temp1_sum,temp2_sum],axis = 1,join = 'inner')
alpha_temp.columns = ['s1','s2']
alpha = pd.DataFrame(alpha_temp['s1']/alpha_temp['s2'] * 100)
alpha.columns = ['alpha52']
return alpha
@timer
def alpha53(self):
close = self.close
close_delay = Delay(close,1)
count = Count(0,close,close_delay,12)
alpha = count/12.0 * 100
alpha.columns = ['alpha53']
return alpha
@timer
def alpha54(self):
Open = self.open
close = self.close
data = pd.concat([Open,close], axis = 1, join = 'inner')
data.columns = ['close','open']
temp = pd.DataFrame(data['close'] - data['open'])
temp_abs = pd.DataFrame(np.abs(temp))
df = pd.concat([temp,temp_abs], axis = 1, join= 'inner')
df.columns = ['temp','abs']
std = STD(pd.DataFrame(df['temp'] + df['abs']),10)
corr = Corr(data,10)
data1 = pd.concat([corr,std],axis = 1, join = 'inner')
data1.columns = ['corr','std']
alpha = Rank(pd.DataFrame(data1['corr'] + data1['std'])) * -1
alpha.columns = ['alpha54']
return alpha
@timer
def alpha55(self):
Open = self.open
close = self.close
low = self.low
high = self.high
close_delay = Delay(close,1)
open_delay = Delay(Open,1)
low_delay = Delay(low,1)
data = pd.concat([Open,close,low,high,close_delay,open_delay,low_delay], axis =1 ,join = 'inner')
data.columns= ['open','close','low','high','close_delay','open_delay','low_delay']
temp1 = pd.DataFrame((data['close'] - data['close_delay'] + (data['close'] - data['open'])/2\
+ data['close_delay'] - data['open_delay'])/ np.abs(data['high'] - data['close_delay']))
temp2 = pd.DataFrame(np.abs(data['high'] - data['close_delay']) + np.abs(data['low'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
temp3 = pd.DataFrame(np.abs(data['low'] - data['close_delay']) + np.abs(data['high'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
abs1 = pd.DataFrame(np.abs(data['high'] - data['close_delay']))
abs2 = pd.DataFrame(np.abs(data['low'] - data['close_delay']))
abs3 = pd.DataFrame(np.abs(data['high'] - data['low_delay']))
data1 = pd.concat([abs1,abs2,abs3], axis = 1, join = 'inner')
data1.columns = ['abs1','abs2','abs3']
data_temp = pd.concat([abs1,abs2],axis = 1, join = 'inner')
data_temp_max = pd.DataFrame(np.max(data_temp,axis = 1))
data_temp_max.columns = ['max']
data_temp1 = pd.concat([data,data_temp_max], axis = 1, join = 'inner')
temp4 = pd.DataFrame((np.abs(data_temp1['high'] - data_temp1['low_delay']) + \
np.abs(data_temp1['close_delay'] - data_temp1['open_delay'])) *\
data_temp1['max'])
data1['judge1'] = 0
data1['judge2'] = 0
data1['judge3'] = 0
data1['judge4'] = 0
data1['judge1'][data1['abs1'] > data1['abs2']] = 1
data1['judge2'][data1['abs1'] > data1['abs3']] = 1
data1['judge3'][data1['abs2'] > data1['abs3']] = 1
data1['judge3'][data1['abs3'] > data1['abs1']] = 1
judge_1 = pd.DataFrame(data1['judge1'] * data1['judge2'])
judge_2 = pd.DataFrame(data1['judge3'] * data1['judge4'])
data2 = pd.concat([temp1,temp2,temp3,temp4,judge_1,judge_2], axis = 1, join = 'inner')
data2.columns = ['t1','t2','t3','t4','j1','j2']
data2['j3'] = 1
data2['j4'] = data2['j3'] - data2['j1'] - data2['j2']
data2['t5'] = data2['t2'] * data2['j1'] + data2['t3'] * data2['j2'] + \
data2['t4'] * data2['j4']
tep = pd.DataFrame(16 * data2['t5']/data2['t1'])
alpha = Sum(tep,20)
alpha.columns = ['alpha55']
return alpha
@timer
def alpha56(self):
low = self.low
high = self.high
volume = self.volume
Open = self.open
open_min = TsMin(Open,12)
data1 = pd.concat([Open,open_min], axis = 1, join = 'inner')
data1.columns = ['open','open_min']
r1 = Rank(pd.DataFrame(data1['open'] - data1['open_min']))
volume_mean = Mean(volume,40)
volume_mean_sum= Sum(volume_mean,19)
data2 = pd.concat([high,low],axis = 1, join = 'inner')
temp = pd.DataFrame((data2['High'] + data2['Low'])/2)
rank = pd.concat([temp,volume_mean_sum],axis = 1 , join = 'inner')
rank.columns = ['temp','volume_mean_sum']
corr = Corr(rank,13)
r2 = Rank(corr)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
r['alpha'] = 0
r['alpha'][r['r1'] >= r['r2']] = 1
alpha = pd.DataFrame(r['alpha'])
alpha.columns = ['alpha56']
return alpha
@timer
def alpha57(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = pd.DataFrame((data['close'] - data['low_min'])/(data['high_max'] \
- data['low_min']) * 100)
alpha = SMA(temp,3,1)
alpha.columns = ['alpha57']
return alpha
@timer
def alpha58(self):
close = self.close
close_delay = Delay(close,1)
count = Count(0,close,close_delay,20)
alpha = count/20.0 * 100
alpha.columns = ['alpha58']
return alpha
@timer
def alpha59(self):
low = self.low
high = self.high
close = self.close
close_delay = Delay(close,1)
max_temp = pd.concat([high,close_delay],axis = 1,join = 'inner')
min_temp = pd.concat([low,close_delay],axis = 1,join = 'inner')
max_temp1 = pd.DataFrame(np.max(max_temp,axis = 1))
min_temp1 = pd.DataFrame(np.min(min_temp,axis = 1))
data = pd.concat([close,close_delay,max_temp1,min_temp1],axis = 1,join = 'inner')
data.columns = ['close','close_delay','max','min']
data['max'][data['close'] > data['close_delay']] = 0
data['min'][data['close'] <= data['close_delay']] = 0
alpha = pd.DataFrame(data['max'] + data['min'])
alpha.columns = ['alpha59']
return alpha
@timer
def alpha60(self):
low = self.low
high = self.high
close = self.close
volume = self.volume
data = pd.concat([low,high,close,volume],axis = 1,join = 'inner')
temp = pd.DataFrame((2 * data['Close'] - data['Low'] - data['High'])/(data['Low'] + \
data['High']) * data['Vol'])
alpha = Sum(temp,20)
alpha.columns = ['alpha60']
return alpha
@timer
def alpha61(self):
low = self.low
volume = self.volume
vwap = self.vwap
vwap_delta = Delta(vwap,1)
vwap_delta_decay = DecayLinear(vwap_delta,12)
r1 = Rank(vwap_delta_decay)
volume_mean = Mean(volume,80)
data = pd.concat([low,volume_mean],axis = 1,join = 'inner')
corr = Corr(data,8)
corr_decay = DecayLinear(corr,17)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
alpha = pd.DataFrame(np.max(r,axis = 1) * -1)
alpha.columns = ['alpha61']
return alpha
@timer
def alpha62(self):
high = self.high
volume = self.volume
volume_r = Rank(volume)
data = pd.concat([high,volume_r],axis = 1,join = 'inner')
alpha = -1 * Corr(data,5)
alpha.columns = ['alpha62']
return alpha
@timer
def alpha63(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delay']
data['max'] = data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
sma1 = SMA(pd.DataFrame(data['max']),6,1)
sma2 = SMA(pd.DataFrame(data['abs']),6,1)
sma = pd.concat([sma1,sma2],axis = 1,join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'] * 100)
alpha.columns = ['alpha63']
return alpha
@timer
def alpha64(self):
vwap = self.vwap
volume = self.volume
close = self.close
vwap_r = Rank(vwap)
volume_r = Rank(volume)
data1 = pd.concat([vwap_r,volume_r],axis = 1,join = 'inner')
corr1 = Corr(data1,4)
corr1_decay = DecayLinear(corr1,4)
r1 = Rank(corr1_decay)
close_mean = Mean(close,60)
close_r = Rank(close)
close_mean_r = Rank(close_mean)
data2 = pd.concat([close_r,close_mean_r],axis = 1,join = 'inner')
corr2 = Corr(data2,4)
corr2_max = TsMax(corr2,13)
corr2_max_decay = DecayLinear(corr2_max,14)
r2 = Rank(corr2_max_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
alpha = pd.DataFrame(np.max(r,axis = 1) *-1)
alpha.columns = ['alpha64']
return alpha
@timer
def alpha65(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
alpha = pd.DataFrame(data['close_mean']/data['close'])
alpha.columns = ['alpha65']
return alpha
@timer
def alpha66(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
alpha = (data['close'] - data['close_mean'])/data['close_mean'] * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha66']
return alpha
@timer
def alpha67(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delay']
data['max'] = data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
sma1 = SMA(pd.DataFrame(data['max']),24,1)
sma2 = SMA(pd.DataFrame(data['abs']),24,1)
sma = pd.concat([sma1,sma2],axis = 1,join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'] * 100)
alpha.columns = ['alpha67']
return alpha
@timer
def alpha68(self):
high = self.high
volume = self.volume
low = self.low
data = pd.concat([high,low,volume],axis = 1,join = 'inner')
data['sum']= (data['High'] + data['Low'])/2
data['sum_delta'] = Delta(pd.DataFrame(data['sum']),1)
temp = data['sum_delta'] * (data['High'] - data['Low'])/data['Vol']
alpha = SMA(pd.DataFrame(temp),15,2)
alpha.columns = ['alpha68']
return alpha
@timer
def alpha69(self):
high = self.high
low = self.low
Open = self.open
dtm = DTM(Open,high)
dbm = DBM(Open,low)
dtm_sum = Sum(dtm,20)
dbm_sum = Sum(dbm,20)
data = pd.concat([dtm_sum,dbm_sum],axis = 1, join = 'inner')
data.columns = ['dtm','dbm']
data['temp1'] = (data['dtm'] - data['dbm'])/data['dtm']
data['temp2'] = (data['dtm'] - data['dbm'])/data['dbm']
data['temp1'][data['dtm'] <= data['dbm']] = 0
data['temp2'][data['dtm'] >= data['dbm']] = 0
alpha = pd.DataFrame(data['temp1'] + data['temp2'])
alpha.columns = ['alpha69']
return alpha
@timer
def alpha70(self):
amount = self.amt
alpha= STD(amount,6)
alpha.columns = ['alpha70']
return alpha
@timer
def alpha71(self):
close = self.close
close_mean = Mean(close,24)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
alpha = (data['close'] - data['close_mean'])/data['close_mean'] * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha71']
return alpha
@timer
def alpha72(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,6)
high_max = TsMax(high,6)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = (data['high_max'] - data['close'])/(data['high_max'] - data['low_min']) * 100
alpha = SMA(pd.DataFrame(temp),15,1)
alpha.columns = ['alpha72']
return alpha
@timer
def alpha73(self):
vwap = self.vwap
volume = self.volume
close = self.close
data1 = pd.concat([close,volume],axis = 1,join = 'inner')
corr1 = Corr(data1,10)
corr1_decay = DecayLinear(DecayLinear(corr1,16),4)
r1 = TsRank(corr1_decay,5)
volume_mean = Mean(volume,30)
data2 = pd.concat([vwap,volume_mean],axis = 1,join = 'inner')
corr2 = Corr(data2,4)
corr2_decay = DecayLinear(corr2,3)
r2 = Rank(corr2_decay)
r = pd.concat([r1,r2],axis = 1,join ='inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r2'] - r['r1'])
alpha.columns= ['alpha73']
return alpha
@timer
def alpha74(self):
vwap = self.vwap
volume = self.volume
low = self.low
volume_mean = Mean(volume,40)
volume_mean_sum = Sum(volume_mean,20)
data1 = pd.concat([low,vwap],axis = 1,join = 'inner')
data_sum = Sum(pd.DataFrame(data1['Low'] * 0.35 + data1['Vwap'] * 0.65),20)
data = pd.concat([volume_mean_sum,data_sum],axis = 1,join = 'inner')
corr = Corr(data,7)
r1 = Rank(corr)
vwap_r = Rank(vwap)
volume_r = Rank(volume)
data_temp = pd.concat([vwap_r,volume_r],axis = 1,join = 'inner')
corr2 = Corr(data_temp,6)
r2 = Rank(corr2)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha74']
return alpha
@timer
def alpha75(self):
close = self.close
Open = self.open
close_index = self.close_index
open_index = self.open_index
data1 = pd.concat([close,Open], axis = 1, join = 'inner')
data1.columns = ['close','open']
data1['temp'] = 1
data1['temp'][data1['close'] <= data1['open']] = 0
data2 = pd.concat([close_index,open_index], axis = 1, join = 'inner')
data2.columns = ['close','open']
data2['tep'] = 1
data2['tep'][data2['close'] > data2['open']] = 0
temp = data1['temp'].unstack()
tep = data2['tep'].unstack()
tep1 = repmat(tep,1,np.size(temp,1))
data3 = temp * tep1
temp_result = data3.rolling(50,min_periods = 50).sum()
tep_result = tep.rolling(50,min_periods = 50).sum()
tep2_result = np.matlib.repmat(tep_result,1,np.size(temp,1))
result = temp_result/tep2_result
alpha = pd.DataFrame(result.stack())
alpha.columns = ['alpha75']
return alpha
@timer
def alpha76(self):
volume = self.volume
close = self.close
close_delay = Delay(close,1)
data = pd.concat([volume,close,close_delay],axis = 1,join = 'inner')
data.columns = ['volume','close','close_delay']
temp = pd.DataFrame(np.abs((data['close']/data['close_delay'] -1 )/data['volume']))
temp_std = STD(temp,20)
temp_mean = Mean(temp,20)
data_temp = pd.concat([temp_std,temp_mean],axis = 1,join = 'inner')
data_temp.columns = ['std','mean']
alpha = pd.DataFrame(data_temp['std']/data_temp['mean'])
alpha.columns = ['alpha76']
return alpha
@timer
def alpha77(self):
vwap = self.vwap
volume = self.volume
low = self.low
high = self.high
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
temp = pd.DataFrame((data['High'] + data['Low'])/2 - data['Vwap'])
temp_decay = DecayLinear(temp,20)
r1 = Rank(temp_decay)
temp1 = pd.DataFrame((data['High'] + data['Low'])/2)
volume_mean = Mean(volume,40)
data2 = pd.concat([temp1,volume_mean],axis = 1,join = 'inner')
corr = Corr(data2,3)
corr_decay = DecayLinear(corr,6)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
alpha = pd.DataFrame(np.min(r,axis = 1))
alpha.columns = ['alpha77']
return alpha
@timer
def alpha78(self):
low = self.low
high = self.high
close = self.close
data = pd.concat([low,high,close],axis = 1,join = 'inner')
temp = pd.DataFrame((data['Low'] + data['High'] + data['Close'])/3)
temp.columns = ['temp']
temp_mean = Mean(temp,12)
temp_mean.columns = ['temp_mean']
temp2 = pd.concat([temp,temp_mean],axis = 1,join = 'inner')
tmp = pd.DataFrame(temp2['temp'] - temp2['temp_mean'])
data1 = pd.concat([close,temp_mean],axis = 1,join = 'inner')
temp_abs = pd.DataFrame(np.abs(data1['Close'] - data1['temp_mean']))
temp_abs_mean = Mean(temp_abs,12)
df = pd.concat([tmp,temp_abs_mean],axis = 1,join = 'inner')
df.columns = ['df1','df2']
alpha = pd.DataFrame(df['df1']/(df['df2'] * 0.015))
alpha.columns = ['alpha78']
return alpha
@timer
def alpha79(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delay']
data['max'] = data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
sma1 = SMA(pd.DataFrame(data['max']),12,1)
sma2 = SMA(pd.DataFrame(data['abs']),12,1)
sma = pd.concat([sma1,sma2],axis = 1,join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'] * 100)
alpha.columns = ['alpha79']
return alpha
@timer
def alpha80(self):
volume = self.volume
volume_delay = Delay(volume,5)
volume_delay.columns = ['volume_delay']
data = pd.concat([volume,volume_delay],axis = 1,join = 'inner')
alpha = (data['Vol'] - data['volume_delay'])/data['volume_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha80']
return alpha
@timer
def alpha81(self):
volume = self.volume
alpha = SMA(volume,21,2)
alpha.columns = ['alpha81']
return alpha
@timer
def alpha82(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,6)
high_max = TsMax(high,6)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = (data['high_max'] - data['close'])/(data['high_max'] - data['low_min']) * 100
alpha = SMA(pd.DataFrame(temp),20,1)
alpha.columns = ['alpha82']
return alpha
@timer
def alpha83(self):
high = self.high
volume = self.volume
high_r = Rank(high)
volume_r = Rank(volume)
data = pd.concat([high_r,volume_r],axis = 1,join = 'inner')
corr = Corr(data,5)
alpha = -1 * Rank(corr)
alpha.columns = ['alpha83']
return alpha
@timer
def alpha84(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
data['sign'][data['Close'] == data['close_delay']] = 0
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,20)
alpha.columns = ['alpha84']
return alpha
@timer
def alpha85(self):
close = self.close
volume = self.volume
volume_mean = Mean(volume,20)
close_delta = Delta(close,7)
data1 = pd.concat([volume,volume_mean],axis = 1,join = 'inner')
data1.columns = ['volume','volume_mean']
temp1 = pd.DataFrame(data1['volume']/data1['volume_mean'])
r1 = TsRank(temp1,20)
r2 = TsRank(-1 * close_delta,8)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'])
alpha.columns = ['alpha85']
return alpha
@timer
def alpha86(self):
close = self.close
close_delay20 = Delay(close,20)
close_delay10 = Delay(close,20)
data = pd.concat([close,close_delay20,close_delay10],axis = 1,join = 'inner')
data.columns = ['close','close_delay20','close_delay10']
temp = pd.DataFrame((data['close_delay20'] - data['close_delay10'])/10 - \
(data['close_delay10'] - data['close'])/10)
close_delta = Delta(close,1) * -1
data_temp = pd.concat([close_delta,temp],axis = 1,join = 'inner')
data_temp.columns = ['close_delta','temp']
data_temp['close_delta'][data_temp['temp'] > 0.25]= -1
data_temp['close_delta'][data_temp['temp'] < 0]= 1
alpha = pd.DataFrame(data_temp['close_delta'])
alpha.columns = ['alpha86']
return alpha
@timer
def alpha87(self):
vwap = self.vwap
high = self.high
low = self.low
Open = self.open
vwap_delta = Delta(vwap,4)
vwap_delta_decay = DecayLinear(vwap_delta,7)
r1 = Rank(vwap_delta_decay)
data = pd.concat([low,high,vwap,Open], axis = 1, join = 'inner')
temp = pd.DataFrame((data['Low'] * 0.1 + data['High'] * 0.9 - data['Vwap'])/\
(data['Open'] - 0.5 * (data['Low'] + data['High'])))
temp_decay = DecayLinear(temp,11)
r2 = TsRank(temp_decay,7)
r = pd.concat([r1,r2], axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(-1 * (r['r1'] + r['r2']))
alpha.columns = ['alpha87']
return alpha
@timer
def alpha88(self):
close = self.close
close_delay = Delay(close,20)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delta']
alpha = (data['close'] - data['close_delta'])/data['close_delta'] * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha88']
return alpha
@timer
def alpha89(self):
close = self.close
sma1 = SMA(close,13,2)
sma2 = SMA(close,27,2)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
temp = pd.DataFrame(sma['sma1'] - sma['sma2'])
sma3 = SMA(temp,10,2)
data = pd.concat([temp,sma3],axis = 1, join = 'inner')
data.columns = ['temp','sma']
alpha = pd.DataFrame(2 *(data['temp'] - data['sma']))
alpha.columns = ['alpha89']
return alpha
@timer
def alpha90(self):
volume = self.volume
vwap = self.vwap
r1 = Rank(volume)
r2 = Rank(vwap)
rank = pd.concat([r1,r2], axis = 1, join = 'inner')
corr = Corr(rank,5)
alpha = -1 * Rank(corr)
alpha.columns = ['alpha90']
return alpha
@timer
def alpha91(self):
close = self.close
volume = self.volume
low = self.low
close_max = TsMax(close,5)
data1 = pd.concat([close,close_max], axis = 1,join = 'inner')
data1.columns = ['close','close_max']
r1 = Rank(pd.DataFrame(data1['close'] - data1['close_max']))
volume_mean = Mean(volume,40)
data2 = pd.concat([volume_mean,low], axis = 1, join = 'inner')
corr = Corr(data2,5)
r2 = Rank(corr)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'] * -1)
alpha.columns = ['alpha91']
return alpha
@timer
def alpha92(self):
volume = self.volume
vwap = self.vwap
close = self.close
data = pd.concat([close,vwap],axis = 1, join = 'inner')
data['price'] = data['Close'] * 0.35 + data['Vwap'] * 0.65
price_delta = Delta(pd.DataFrame(data['price']),2)
price_delta_decay = DecayLinear(price_delta,3)
r1 = Rank(price_delta_decay)
volume_mean = Mean(volume,180)
rank = pd.concat([volume_mean,close],axis = 1,join = 'inner')
corr = Corr(rank,13)
temp = pd.DataFrame(np.abs(corr))
temp_decay = DecayLinear(temp,5)
r2 = TsRank(temp_decay,15)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
alpha = pd.DataFrame(-1 * np.max(r, axis = 1))
alpha.columns = ['alpha92']
return alpha
@timer
def alpha93(self):
low = self.low
Open = self.open
open_delay = Delay(Open,1)
data = pd.concat([low,Open,open_delay],axis = 1,join = 'inner')
data.columns = ['low','open','open_delay']
temp1 = pd.DataFrame(data['open'] - data['low'])
temp2 = pd.DataFrame(data['open'] - data['open_delay'])
data_temp = pd.concat([temp1,temp2],axis = 1 ,join = 'inner')
temp_max = pd.DataFrame(np.max(data_temp,axis = 1))
temp_max.columns = ['max']
data2 = pd.concat([data,temp_max],axis = 1,join = 'inner')
data2['temp'] = data2['max']
data2['temp'][data2['open'] >= data2['open_delay']] = 0
alpha = Sum(pd.DataFrame(data2['temp']),20)
alpha.columns = ['alpha93']
return alpha
@timer
def alpha94(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
data['sign'][data['Close'] == data['close_delay']] = 0
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,30)
alpha.columns = ['alpha94']
return alpha
@timer
def alpha95(self):
amt = self.amt
alpha = STD(amt,20)
alpha.columns = ['alpha95']
return alpha
@timer
def alpha96(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = ( data['close'] - data['low_min'])/(data['high_max'] - data['low_min']) * 100
alpha_temp = SMA(pd.DataFrame(temp),3,1)
alpha = SMA(alpha_temp,3,1)
alpha.columns = ['alpha96']
return alpha
@timer
def alpha97(self):
volume = self.volume
alpha = STD(volume,10)
alpha.columns = ['alpha97']
return alpha
@timer
def alpha98(self):
close = self.close
close_mean = Mean(close,100)
close_mean_delta = Delta(close_mean,100)
close_delay = Delay(close,100)
data = pd.concat([close_mean_delta,close_delay],axis = 1,join = 'inner')
data.columns = ['delta','delay']
temp = pd.DataFrame(data['delta']/ data['delay'])
close_delta = Delta(close,3)
close_min = TsMin(close,100)
data_temp = pd.concat([close,close_delta,close_min,temp],axis = 1,join = 'inner')
data_temp.columns = ['close','close_delta','close_min','temp']
data_temp['diff'] = (data_temp['close'] - data_temp['close_min']) * -1
data_temp['diff'][data_temp['temp'] < 0.05] = 0
data_temp['close_delta'] = data_temp['close_delta'] * -1
data_temp['close_delta'][data_temp['temp'] >= 0.05]= 0
alpha = pd.DataFrame(data_temp['close_delta'] + data_temp['diff'])
alpha.columns = ['alpha98']
return alpha
@timer
def alpha99(self):
close = self.close
volume = self.volume
r1 = Rank(close)
r2 = Rank(volume)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
cov = Cov(r,5)
alpha = -1 * Rank(cov)
alpha.columns = ['alpha99']
return alpha
@timer
def alpha100(self):
volume = self.volume
alpha = STD(volume,20)
alpha.columns = ['alpha100']
return alpha
@timer
def alpha101(self):
close = self.close
volume = self.volume
high = self.high
vwap = self.vwap
volume_mean = Mean(volume,30)
volume_mean_sum = Sum(volume_mean,37)
data1 = pd.concat([close,volume_mean_sum], axis = 1, join = 'inner')
corr1 = Corr(data1,15)
r1 = Rank(corr1)
data2 = pd.concat([high,vwap],axis = 1, join = 'inner')
temp = pd.DataFrame(data2['High'] * 0.1 + data2['Vwap'] * 0.9)
temp_r = Rank(temp)
volume_r = Rank(volume)
data3 = pd.concat([temp_r,volume_r], axis = 1, join = 'inner')
corr2 = Corr(data3,11)
r2 = Rank(corr2)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
r['alpha'] = 0
r['alpha'][r['r1'] < r['r2']] = -1
alpha = pd.DataFrame(r['alpha'])
alpha.columns = ['alpha101']
return alpha
@timer
def alpha102(self):
volume = self.volume
temp = Delta(volume,1)
temp.columns = ['temp']
temp['max'] = temp['temp']
temp['max'][temp['temp'] < 0 ] = 0
temp['abs'] = np.abs(temp['temp'])
sma1 = SMA(pd.DataFrame(temp['max']),6,1)
sma2 = SMA(pd.DataFrame(temp['abs']),6,1)
sma = pd.concat([sma1,sma2], axis = 1 ,join ='inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/ sma['sma2'] * 100)
alpha.columns = ['alpha102']
return alpha
@timer
def alpha103(self):
low = self.low
lowday = Lowday(low,20)
alpha = (20 - lowday)/20.0 * 100
alpha.columns = ['alpha103']
return alpha
@timer
def alpha104(self):
close = self.close
volume = self.volume
high = self.high
data = pd.concat([high,volume], axis = 1, join = 'inner')
corr = Corr(data,5)
corr_delta = Delta(corr,5)
close_std = STD(close,20)
r1 = Rank(close_std)
temp = pd.concat([corr_delta,r1], axis = 1, join = 'inner')
temp.columns = ['delta','r']
alpha = pd.DataFrame(-1 * temp['delta'] * temp['r'])
alpha.columns = ['alpha104']
return alpha
@timer
def alpha105(self):
volume = self.volume
Open = self.open
volume_r = Rank(volume)
open_r = Rank(Open)
rank = pd.concat([volume_r,open_r],axis = 1, join = 'inner')
alpha = -1 * Corr(rank,10)
alpha.columns = ['alpha105']
return alpha
@timer
def alpha106(self):
close = self.close
close_delay = Delay(close,20)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
alpha = pd.DataFrame(data['close'] - data['close_delay'])
alpha.columns = ['alpha106']
return alpha
@timer
def alpha107(self):
Open = self.open
high = self.high
close = self.close
low = self.low
high_delay = Delay(high,1)
close_delay = Delay(close,1)
low_delay = Delay(low,1)
data = pd.concat([high_delay,close_delay,low_delay,Open], axis = 1, join = 'inner')
data.columns = ['high_delay','close_delay','low_delay','open']
r1 = Rank(pd.DataFrame(data['open'] - data['high_delay']))
r2 = Rank(pd.DataFrame(data['open'] - data['close_delay']))
r3 = Rank(pd.DataFrame(data['open'] - data['low_delay']))
alpha = -1 * r1 * r2 * r3
alpha.columns = ['alpha107']
return alpha
@timer
def alpha108(self):
high = self.high
volume = self.volume
vwap = self.vwap
high_min = TsMin(high,2)
data1 = pd.concat([high,high_min], axis = 1, join = 'inner')
data1.columns = ['high','high_min']
r1 = Rank(pd.DataFrame(data1['high'] - data1['high_min']))
volume_mean = Mean(volume,120)
rank = pd.concat([vwap,volume_mean],axis = 1, join = 'inner')
corr = Corr(rank,6)
r2 = Rank(corr)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = r['r1'] * r['r2'] * -1
alpha.columns = ['alpha108']
return alpha
@timer
def alpha109(self):
high = self.high
low = self.low
data = pd.concat([high,low],axis = 1, join = 'inner')
temp = SMA(pd.DataFrame(data['High'] - data['Low']),10,2)
sma = SMA(temp,10,2)
sma_temp = pd.concat([temp,sma],axis = 1, join = 'inner')
sma_temp.columns = ['temp','sma']
alpha = pd.DataFrame(sma_temp['temp']/sma_temp['sma'])
alpha.columns = ['alpha109']
return alpha
@timer
def alpha110(self):
high = self.high
low = self.low
close = self.close
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([high,low,close_delay], axis = 1, join = 'inner')
data['max1'] = data['High'] - data['close_delay']
data['max2'] = data['close_delay'] - data['Low']
data['max1'][data['max1'] < 0] = 0
data['max2'][data['max2'] < 0] = 0
s1 = Sum(pd.DataFrame(data['max1']),20)
s2 = Sum(pd.DataFrame(data['max2']),20)
s = pd.concat([s1,s2], axis = 1 , join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(s['s1']/s['s2'])
alpha.columns = ['alpha110']
return alpha
@timer
def alpha111(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
data = pd.concat([high,low,close,volume], axis = 1, join = 'inner')
temp = pd.DataFrame(data['Vol'] * (2 * data['Close'] - data['Low'] - data['High'])\
/(data['High'] - data['Low']))
sma1 = SMA(temp,11,2)
sma2 = SMA(temp,4,2)
sma = pd.concat([sma1, sma2], axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1'] - sma['sma2'])
alpha.columns = ['alpha111']
return alpha
@timer
def alpha112(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close, close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
data['temp'] = 1
data['temp'][data['close'] > data['close_delay']] = 0
alpha = Sum(pd.DataFrame(data['temp']),12)
alpha.columns = ['alpha112']
return alpha
@timer
def alpha113(self):
close = self.close
volume = self.volume
close_delay = Delay(close,5)
close_delay_mean = Mean(close_delay,20)
data1 = pd.concat([close,volume],axis = 1, join = 'inner')
corr = Corr(data1,2)
r1 = Rank(close_delay_mean)
data2 = pd.concat([r1,corr], axis = 1, join = 'inner')
data2.columns = ['r1','corr']
r1 = pd.DataFrame(data2['r1'] * data2['corr'])
close_sum5 = Sum(close,5)
close_sum20 = Sum(close,20)
data3 = pd.concat([close_sum5,close_sum20],axis = 1, join = 'inner')
corr2 = Corr(data3,2)
r2 = Rank(corr2)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'] * -1)
alpha.columns = ['alpha113']
return alpha
@timer
def alpha114(self):
close = self.close
high = self.high
low = self.low
volume = self.volume
vwap = self.vwap
close_mean = Mean(close,5)
data = pd.concat([high,low,close_mean], axis = 1, join = 'inner')
data.columns = ['high','low','close_mean']
temp = pd.DataFrame(data['high'] - data['low'] / data['close_mean'])
temp_delay = Delay(temp,2)
r1 = TsRank(temp_delay,5)
temp1 = pd.concat([temp,vwap,close], axis = 1, join = 'inner')
temp1.columns = ['temp','vwap','close']
tep = pd.DataFrame(temp1['temp']/(temp1['vwap'] - temp1['close']))
r2 = TsRank(volume,5)
data2 = pd.concat([r2,tep], axis = 1, join = 'inner')
data2.columns = ['r2','tep']
tep1 = pd.DataFrame(data2['r2']/data2['tep'])
r3 = TsRank(tep1,5)
r = pd.concat([r1,r3],axis = 1, join = 'inner')
r.columns = ['r1','r3']
alpha = pd.DataFrame(r['r1'] + r['r3'])
alpha.columns = ['alpha114']
return alpha
@timer
def alpha115(self):
high = self.high
low = self.low
volume = self.volume
volume_mean = Mean(volume,30)
price = pd.concat([high,low], axis = 1, join = 'inner')
price.columns = ['high','low']
price_temp = price['high'] * 0.9 + price['low'] * 0.1
data = pd.concat([price_temp,volume_mean],axis = 1, join = 'inner')
corr = Corr(data,10)
r1 = Rank(corr)
data2 = pd.concat([high,low], axis = 1, join = 'inner')
temp = pd.DataFrame((data2['High'] + data2['Low'])/2)
temp_r = TsRank(temp,4)
volume_r = TsRank(volume,10)
data3 = pd.concat([temp_r,volume_r], axis = 1, join = 'inner')
corr2 = Corr(data3,7)
r2 = Rank(corr2)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'])
alpha.columns = ['alpha115']
return alpha
@timer
def alpha116(self):
close = self.close
alpha = RegResi(0,close,None,20)
alpha.columns = ['alpha116']
return alpha
@timer
def alpha117(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
ret = self.ret
r1 = TsRank(volume,32)
data1 = pd.concat([close,high,low],axis = 1, join = 'inner')
r2 = TsRank(pd.DataFrame(data1['Close'] + data1['High'] - data1['Low']),16)
r3 = TsRank(ret,32)
r = pd.concat([r1,r2,r3], axis = 1, join = 'inner')
r.columns = ['r1','r2','r3']
alpha = pd.DataFrame(r['r1'] * (1 - r['r2']) * (1 - r['r3']))
alpha.columns = ['alpha117']
return alpha
@timer
def alpha118(self):
high = self.high
low = self.low
Open = self.open
data = pd.concat([high,low,Open], axis = 1, join = 'inner')
s1 = Sum(pd.DataFrame(data['High'] - data['Open']),20)
s2 = Sum(pd.DataFrame(data['Open'] - data['Low']),20)
s = pd.concat([s1,s2], axis = 1, join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(s['s1']/s['s2'] * 100)
alpha.columns = ['alpha118']
return alpha
@timer
def alpha119(self):
Open = self.open
volume = self.volume
vwap = self.vwap
volume_mean = Mean(volume,5)
volume_mean_sum = Sum(volume_mean,26)
data1 = pd.concat([vwap,volume_mean_sum],axis = 1, join = 'inner')
corr1 = Corr(data1,5)
corr1_decay = DecayLinear(corr1,7)
r1 = Rank(corr1_decay)
open_r = Rank(Open)
volume_mean2 = Mean(volume,15)
volume_mean2_r = Rank(volume_mean2)
data2 = pd.concat([open_r, volume_mean2_r], axis = 1, join = 'inner')
corr2 = Corr(data2,21)
corr2_min = TsMin(corr2,9)
corr2_min_r = TsRank(corr2_min,7)
corr_min_r_decay = DecayLinear(corr2_min_r,8)
r2 = Rank(corr_min_r_decay)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] - r['r2'])
alpha.columns = ['alpha119']
return alpha
@timer
def alpha120(self):
vwap = self.vwap
close = self.close
data = pd.concat([vwap,close], axis = 1, join = 'inner')
r1 = Rank(pd.DataFrame(data['Vwap'] - data['Close']))
r2 = Rank(pd.DataFrame(data['Vwap'] + data['Close']))
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1']/r['r2'])
alpha.columns = ['alpha120']
return alpha
@timer
def alpha121(self):
vwap = self.vwap
volume = self.volume
vwap_r = TsRank(vwap,20)
volume_mean = Mean(volume,60)
volume_mean_r = TsRank(volume_mean,2)
data = pd.concat([vwap_r,volume_mean_r], axis = 1, join = 'inner')
corr= Corr(data,18)
temp = TsRank(corr,3)
vwap_min = TsMin(vwap,12)
data2 = pd.concat([vwap,vwap_min],axis = 1, join = 'inner')
data2.columns = ['vwap','vwap_min']
rank = Rank(pd.DataFrame(data2['vwap'] - data2['vwap_min']))
data3 = pd.concat([rank,temp],axis = 1, join = 'inner')
data3.columns = ['rank','temp']
alpha = pd.DataFrame(np.power(data3['rank'],data3['temp']) * -1)
alpha.columns = ['alpha121']
return alpha
@timer
def alpha122(self):
close = self.close
close_ln = pd.DataFrame(np.log(close))
temp1 = SMA(close_ln,13,2)
sma1 = SMA(temp1,13,2)
sma2 = SMA(sma1,13,2)
sma3 = SMA(sma2,13,2)
sma3_delay = Delay(sma3,1)
data = pd.concat([sma3,sma3_delay],axis = 1, join = 'inner')
data.columns = ['sma','sma_delay']
alpha = pd.DataFrame(data['sma']/data['sma_delay'])
alpha.columns = ['alpha122']
return alpha
@timer
def alpha123(self):
volume = self.volume
high = self.high
low = self.low
data1 = pd.concat([high,low], axis = 1, join = 'inner')
s1 = Sum(pd.DataFrame((data1['High'] + data1['Low'])/2),20)
volume_mean = Mean(volume,60)
s2 = Sum(volume_mean,20)
data2 = pd.concat([s1,s2], axis = 1, join = 'inner')
corr1 = Corr(data2,9)
data3 = pd.concat([low,volume], axis = 1, join = 'inner')
corr2 = Corr(data3,6)
corr1_r = Rank(corr1)
corr2_r = Rank(corr2)
data = pd.concat([corr1_r,corr2_r], axis = 1, join = 'inner')
data.columns = ['r1','r2']
data['alpha'] = -1
data['alpha'][data['r1'] >= data['r2']] = 0
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha123']
return alpha
@timer
def alpha124(self):
close = self.close
vwap = self.vwap
close_max = TsMax(close,30)
close_max_r = Rank(close_max)
close_max_r_decay = DecayLinear(close_max_r,2)
close_max_r_decay.columns = ['decay']
data = pd.concat([close,vwap,close_max_r_decay], axis = 1, join ='inner')
alpha = pd.DataFrame((data['Close'] - data['Vwap'])/data['decay'])
alpha.columns = ['alpha124']
return alpha
@timer
def alpha125(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_mean = Mean(volume,80)
data1 = pd.concat([vwap,volume_mean], axis = 1, join = 'inner')
corr1 = Corr(data1,17)
data2 = pd.concat([close,vwap], axis = 1, join = 'inner')
temp2 = pd.DataFrame(0.5*(data2['Close'] + data2['Vwap']))
temp2_delta = Delta(temp2,3)
corr1_decay = DecayLinear(corr1,20)
r1 = Rank(corr1_decay)
temp2_delta_decay = DecayLinear(temp2_delta,16)
r2 = Rank(temp2_delta_decay)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1']/r['r2'])
alpha.columns = ['alpha125']
return alpha
@timer
def alpha126(self):
close = self.close
high = self.high
low = self.low
data = pd.concat([close,high,low], axis = 1, join = 'inner')
alpha = pd.DataFrame((data['Close'] + data['High'] + data['Low'])/3)
alpha.columns = ['alpha126']
return alpha
@timer
def alpha127(self):
close = self.close
close_max = TsMax(close,12)
data = pd.concat([close,close_max], axis = 1, join = 'inner')
data.columns = ['close','close_max']
alpha = pd.DataFrame((data['close'] - data['close_max'])/data['close_max'])
alpha.columns = ['alpha127']
return alpha
@timer
def alpha128(self):
close = self.close
high = self.high
low = self.low
volume = self.volume
data = pd.concat([close,high,low,volume], axis = 1, join = 'inner')
data['temp1'] = (data['Close'] + data['Low'] + data['High'])/3
data['temp2'] = data['temp1'] * data['Vol']
data['temp3'] = data['temp1'] * data['Vol']
temp_delay = Delay(pd.DataFrame(data['temp1']),1)
temp_delay.columns = ['temp_decay']
data = pd.concat([data,temp_delay], axis = 1, join = 'inner')
data['temp2'][data['temp1'] < data['temp_decay']] = 0
data['temp3'][data['temp1'] > data['temp_decay']] = 0
s1 = Sum(pd.DataFrame(data['temp2']),14)
s2 = Sum(pd.DataFrame(data['temp3']),14)
s = pd.concat([s1,s2], axis = 1, join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(100 - 100/(1+ s['s1']/s['s2']))
alpha.columns = ['alpha128']
return alpha
@timer
def alpha129(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
data['abs'] = np.abs(data['close'] - data['close_delay'])
data['temp'] = data['abs']
data['temp'][data['close'] < data['close_delay']] = 0
alpha = Sum(pd.DataFrame(data['temp']),12)
alpha.columns = ['alpha129']
return alpha
@timer
def alpha130(self):
close = self.close
high = self.high
low = self.low
volume = self.volume
volume_mean = Mean(volume,40)
data1 = pd.concat([high,low],axis = 1, join = 'inner')
temp1 = pd.DataFrame((data1['High'] + data1['Low'])/2)
rank1 = pd.concat([temp1,volume_mean], axis = 1, join = 'inner')
corr = Corr(rank1,9)
close_r = Rank(close)
volume_r = Rank(volume)
data2 = pd.concat([close_r,volume_r],axis = 1, join = 'inner')
corr2 = Corr(data2,7)
corr_decay = DecayLinear(corr,10)
r1 = Rank(corr_decay)
corr2_decay = DecayLinear(corr2,3)
r2 = Rank(corr2_decay)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1']/r['r2'])
alpha.columns = ['alpha130']
return alpha
@timer
def alpha131(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_mean = Mean(volume,50)
data1 = pd.concat([close,volume_mean], axis = 1, join = 'inner')
corr = Corr(data1,18)
vwap_delta = Delta(vwap,1)
temp2 = TsRank(corr,18)
data2 = pd.concat([vwap_delta,temp2],axis = 1, join = 'inner')
data2.columns = ['vwap_delta','temp2']
temp3 = np.power(data2['vwap_delta'],data2['temp2'])
alpha = Rank(pd.DataFrame(temp3))
alpha.columns = ['alpha131']
return alpha
@timer
def alpha132(self):
amt = self.amt
alpha = Mean(amt,20)
alpha.columns = ['alpha132']
return alpha
@timer
def alpha133(self):
low = self.low
high = self.high
highday = Highday(high,20)
lowday = Lowday(low,20)
data = pd.concat([highday,lowday],axis = 1, join = 'inner')
data.columns = ['highday','lowday']
alpha = (20 - data['highday']/20.0) * 100 - (20 - data['lowday']/20.0) * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha133']
return alpha
@timer
def alpha134(self):
close = self.close
volume = self.volume
close_delay = Delay(close,12)
close_delay.columns = ['close_delay']
data = pd.concat([close,volume,close_delay], axis = 1, join = 'inner')
alpha = pd.DataFrame((data['Close'] - data['close_delay'])/data['close_delay'])
alpha.columns = ['alpha134']
return alpha
@timer
def alpha135(self):
close = self.close
close_delay = Delay(close,20)
data = pd.concat([close,close_delay],axis = 1 , join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame(data['close']/data['close_delay'])
temp_delay = Delay(temp,1)
alpha = SMA(temp_delay,20,1)
alpha.columns = ['alpha135']
return alpha
@timer
def alpha136(self):
volume = self.volume
Open = self.open
ret = self.ret
ret_delta = Delta(ret,3)
ret_delta_r = Rank(ret_delta)
data = pd.concat([Open,volume],axis = 1, join = 'inner')
corr = Corr(data,10)
data_temp = pd.concat([ret_delta_r,corr],axis = 1, join = 'inner')
data_temp.columns = ['ret_delta','corr']
alpha = pd.DataFrame(-1 * data_temp['ret_delta'] * data_temp['corr'])
alpha.columns = ['alpha136']
return alpha
@timer
def alpha137(self):
Open = self.open
close = self.close
low = self.low
high = self.high
close_delay = Delay(close,1)
open_delay = Delay(Open,1)
low_delay = Delay(low,1)
data = pd.concat([Open,close,low,high,close_delay,open_delay,low_delay], axis =1 ,join = 'inner')
data.columns= ['open','close','low','high','close_delay','open_delay','low_delay']
temp1 = pd.DataFrame((data['close'] - data['close_delay'] + (data['close'] - data['open'])/2\
+ data['close_delay'] - data['open_delay'])/ np.abs(data['high'] - data['close_delay']))
temp2 = pd.DataFrame(np.abs(data['high'] - data['close_delay']) + np.abs(data['low'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
temp3 = pd.DataFrame(np.abs(data['low'] - data['close_delay']) + np.abs(data['high'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
abs1 = pd.DataFrame(np.abs(data['high'] - data['close_delay']))
abs2 = pd.DataFrame(np.abs(data['low'] - data['close_delay']))
abs3 = pd.DataFrame(np.abs(data['high'] - data['low_delay']))
data1 = pd.concat([abs1,abs2,abs3], axis = 1, join = 'inner')
data1.columns = ['abs1','abs2','abs3']
data_temp = pd.concat([abs1,abs2],axis = 1, join = 'inner')
data_temp_max = pd.DataFrame(np.max(data_temp,axis = 1))
data_temp_max.columns = ['max']
data_temp1 = pd.concat([data,data_temp_max], axis = 1, join = 'inner')
temp4 = pd.DataFrame((np.abs(data_temp1['high'] - data_temp1['low_delay']) + \
np.abs(data_temp1['close_delay'] - data_temp1['open_delay'])) *\
data_temp1['max'])
data1['judge1'] = 0
data1['judge2'] = 0
data1['judge3'] = 0
data1['judge4'] = 0
data1['judge1'][data1['abs1'] > data1['abs2']] = 1
data1['judge2'][data1['abs1'] > data1['abs3']] = 1
data1['judge3'][data1['abs2'] > data1['abs3']] = 1
data1['judge3'][data1['abs3'] > data1['abs1']] = 1
judge_1 = pd.DataFrame(data1['judge1'] * data1['judge2'])
judge_2 = pd.DataFrame(data1['judge3'] * data1['judge4'])
data2 = pd.concat([temp1,temp2,temp3,temp4,judge_1,judge_2], axis = 1, join = 'inner')
data2.columns = ['t1','t2','t3','t4','j1','j2']
data2['j3'] = 1
data2['j4'] = data2['j3'] - data2['j1'] - data2['j2']
data2['t5'] = data2['t2'] * data2['j1'] + data2['t3'] * data2['j2'] + \
data2['t4'] * data2['j4']
alpha = pd.DataFrame(16 * data2['t5']/data2['t1'])
alpha.columns = ['alpha137']
return alpha
@timer
def alpha138(self):
vwap = self.vwap
volume = self.volume
low = self.low
data1 = pd.concat([low,vwap], axis = 1, join = 'inner')
temp1 = pd.DataFrame(data1['Low'] * 0.7 + data1['Vwap'] * 0.3)
temp1_delta = Delta(temp1,3)
temp1_delta_decay = DecayLinear(temp1_delta,20)
r1 = Rank(temp1_delta_decay)
low_r = TsRank(low,8)
volume_mean = Mean(volume,60)
volume_mean_r = TsRank(volume_mean,17)
data2 = pd.concat([low_r,volume_mean_r],axis = 1, join = 'inner')
corr = Corr(data2,5)
corr_r = TsRank(corr,19)
corr_r_decay = DecayLinear(corr_r,16)
r2 = TsRank(corr_r_decay,7)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] - r['r2'])
alpha.columns = ['alpha138']
return alpha
@timer
def alpha139(self):
Open = self.open
volume = self.volume
data = pd.concat([Open,volume], axis = 1, join = 'inner')
alpha = -1 * Corr(data,10)
alpha.columns = ['alpha139']
return alpha
@timer
def alpha140(self):
Open = self.open
volume = self.volume
high = self.high
low = self.low
close = self.close
open_r = Rank(Open)
low_r = Rank(low)
high_r = Rank(high)
close_r = Rank(close)
data1 = pd.concat([open_r,low_r,high_r,close_r],axis = 1, join = 'inner')
data1.columns = ['open_r','low_r','high_r','close_r']
temp = pd.DataFrame(data1['open_r'] + data1['low_r'] - \
(data1['high_r'] + data1['close_r']))
close_r_temp = TsRank(close,8)
volume_mean = Mean(volume,70)
volume_mean_r = TsRank(volume_mean,20)
data2 = pd.concat([close_r_temp,volume_mean_r], axis = 1, join = 'inner')
corr = Corr(data2,8)
temp_decay = DecayLinear(temp,8)
r1 = Rank(temp_decay)
corr_decay = DecayLinear(corr,7)
r2 = TsRank(corr_decay,3)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
alpha = pd.DataFrame(np.min(r))
alpha.columns = ['alpha140']
return alpha
@timer
def alpha141(self):
volume = self.volume
high = self.high
volume_mean = Mean(volume,15)
high_r = Rank(high)
volume_mean_r = Rank(volume_mean)
data = pd.concat([high_r,volume_mean_r], axis = 1, join = 'inner')
corr = Corr(data,9)
alpha = -1 * Rank(corr)
alpha.columns = ['alpha141']
return alpha
@timer
def alpha142(self):
close = self.close
volume = self.volume
close_r = TsRank(close,10)
r1 = Rank(close_r)
close_delta = Delta(close,1)
close_delta_delta = Delta(close_delta,1)
r2 = Rank(close_delta_delta)
volume_mean = Mean(volume,20)
data = pd.concat([volume,volume_mean], axis = 1, join = 'inner')
data.columns = ['v','v_m']
temp = pd.DataFrame(data['v']/data['v_m'])
temp_r = TsRank(temp,5)
r3 = Rank(temp_r)
r = pd.concat([r1,r2,r3],axis = 1, join = 'inner')
r.columns = ['r1','r2','r3']
alpha = pd.DataFrame(- 1* r['r1'] * r['r2'] * r['r3'])
alpha.columns= ['alpha142']
return alpha
@timer
def alpha143(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame((data['close'] - data['close_delay'])/data['close_delay'])
temp.columns= ['temp']
data_temp = pd.concat([data,temp],axis = 1, join = 'inner')
data_temp['temp'][data_temp['close'] <= data_temp['close_delay']] = 1
temp_unstack = data_temp['temp'].unstack()
temp_unstack.iloc[0,:] = 1
df = np.cumprod(temp_unstack,axis = 0)
alpha = df.stack()
alpha.columns = ['alpha143']
return alpha
@timer
def alpha144(self):
close = self.close
amt = self.amt
close_delay = Delay(close,1)
data = pd.concat([close,close_delay,amt], axis = 1, join = 'inner')
data.columns = ['close','close_delay','amt']
data['temp'] = np.abs(data['close']/data['close_delay'] - 1)/data['amt']
data['sign'] = 1
data['sign'][data['close'] >= data['close_delay']] = 0
tep1 = Sum(pd.DataFrame(data['sign'] * data['temp']),20)
tep2 = Count(0,pd.DataFrame(data['close_delay']),pd.DataFrame(data['close']),20)
data2 = pd.concat([tep1,tep2], axis = 1, join = 'inner')
data2.columns = ['tep1','tep2']
alpha = pd.DataFrame(data2['tep1']/data2['tep2'])
alpha.columns = ['alpha144']
return alpha
@timer
def alpha145(self):
volume = self.volume
volume_mean9 = Mean(volume,9)
volume_mean26 = Mean(volume,26)
volume_mean12 = Mean(volume,12)
data = pd.concat([volume_mean9,volume_mean26,volume_mean12], axis = 1, join = 'inner')
data.columns = ['m9','m26','m12']
alpha = pd.DataFrame((data['m9'] - data['m26'])/data['m12'] * 100)
alpha.columns = ['alpha145']
return alpha
@timer
def alpha146(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame((data['close'] -data['close_delay'])/data['close_delay'])
sma1 = SMA(temp,61,2)
data2 = pd.concat([temp,sma1], axis = 1, join = 'inner')
data2.columns = ['temp1','sma1']
data2['temp2'] = data2['temp1'] - data2['sma1']
temp2_mean = Mean(pd.DataFrame(data2['temp2']),20)
sma2 = SMA(pd.DataFrame(data2['temp1'] - data2['temp2']),61,2)
data_temp = pd.concat([temp2_mean,pd.DataFrame(data2['temp2']),sma2], axis = 1 , join = 'inner')
data_temp.columns = ['temp2_mean','temp2','sma2']
alpha = data_temp['temp2_mean'] * data_temp['temp2'] / data_temp['sma2']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha146']
return alpha
@timer
def alpha147(self):
close = self.close
close_mean = Mean(close,12)
alpha = RegBeta(0,close_mean,None,12)
alpha.columns = ['alpha147']
return alpha
@timer
def alpha148(self):
Open = self.open
volume = self.volume
volume_mean = Mean(volume,60)
volume_mean_s = Sum(volume_mean,9)
data = pd.concat([Open,volume_mean_s],axis = 1, join = 'inner')
corr = Corr(data,6)
r1 = Rank(corr)
open_min = TsMin(Open,14)
data2 = pd.concat([Open,open_min], axis = 1, join = 'inner')
data2.columns = ['open','open_min']
r2 = Rank(pd.DataFrame(data2['open'] - data2['open_min']))
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
r['alpha'] = -1
r['alpha'][r['r1'] > r['r2']] = 0
alpha = pd.DataFrame(r['alpha'])
alpha.columns = ['alpha148']
return alpha
@timer
def alpha149(self):
close = self.close
close_index = self.close_index
close_delay = Delay(close,1)
close_index_delay = Delay(close_index,1)
data_index = pd.concat([close_index,close_index_delay], axis = 1, join = 'inner')
data_index.columns = ['close','close_delay']
data_index['delta'] = data_index['close']/data_index['close_delay'] - 1
data_index['judge'] = 1
data_index['judge'][data_index['close'] >= data_index['close_delay']] = 0
data_index['delta'][data_index['judge'] == 0] = np.nan
# index_delta_unstack = index_delta_unstack.dropna()
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
data['delta'] = data['close'] / data['close_delay'] - 1
df1 = pd.DataFrame(data['delta'])
df2 = pd.DataFrame(data_index['delta'])
alpha = RegBeta(1,df1,df2,252)
alpha.columns = ['alpha149']
return alpha
@timer
def alpha150(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
data = pd.concat([high,low,close,volume], axis = 1, join = 'inner')
alpha = (data['Close'] + data['High'] + data['Low'])/3 * data['Vol']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha150']
return alpha
@timer
def alpha151(self):
close = self.close
close_delay = Delay(close,20)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame(data['close'] - data['close_delay'])
alpha = SMA(temp,20,1)
alpha.columns = ['alpha151']
return alpha
@timer
def alpha152(self):
close = self.close
close_delay = Delay(close,9)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame(data['close']/data['close_delay'])
temp_delay = Delay(temp,1)
sma1 = SMA(temp_delay,9,1)
sma1_delay = Delay(sma1,1)
sma1_delay_mean1 = Mean(sma1_delay,12)
sma1_delay_mean2 = Mean(sma1_delay,26)
data_temp = pd.concat([sma1_delay_mean1,sma1_delay_mean2],axis = 1, join = 'inner')
data_temp.columns = ['m1','m2']
alpha = SMA(pd.DataFrame(data_temp['m1'] - data_temp['m2']),9,1)
alpha.columns = ['alpha152']
return alpha
@timer
def alpha153(self):
close = self.close
close_mean3 = Mean(close,3)
close_mean6 = Mean(close,6)
close_mean12 = Mean(close,12)
close_mean24 = Mean(close,24)
data = pd.concat([close_mean3, close_mean6, close_mean12, close_mean24], axis = 1 ,join ='inner')
alpha = pd.DataFrame(np.mean(data, axis = 1))
alpha.columns = ['alpha153']
return alpha
@timer
def alpha154(self):
volume = self.volume
vwap = self.vwap
volume_mean = Mean(volume,180)
data = pd.concat([vwap,volume_mean], axis = 1, join = 'inner')
corr = Corr(data,18)
vwap_min = TsMin(vwap,16)
data1 = pd.concat([vwap,vwap_min],axis = 1, join = 'inner')
data1.columns = ['vwap','vwap_min']
temp = pd.DataFrame(data1['vwap'] - data1['vwap_min'])
data_temp = pd.concat([corr,temp], axis = 1, join = 'inner')
data_temp.columns = ['corr','temp']
data_temp['alpha'] = 1
data_temp['alpha'][data_temp['corr'] >= data_temp['temp']] = 0
alpha = pd.DataFrame(data_temp['alpha'])
alpha.columns = ['alpha154']
return alpha
@timer
def alpha155(self):
volume = self.volume
sma1 = SMA(volume,13,2)
sma2 = SMA(volume,26,2)
sma = pd.concat([sma1, sma2], axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
temp = pd.DataFrame(sma['sma1'] - sma['sma2'])
sma3 = SMA(temp,10,2)
data = pd.concat([temp,sma3], axis = 1 ,join = 'inner')
data.columns = ['temp','sma']
alpha = pd.DataFrame(data['temp'] - data['sma'])
alpha.columns = ['alpha155']
return alpha
@timer
def alpha156(self):
vwap = self.vwap
Open = self.open
low = self.low
vwap_delta = Delta(vwap,5)
vwap_delta_decay = DecayLinear(vwap_delta,3)
r1 = Rank(vwap_delta_decay)
data1 = pd.concat([Open,low],axis = 1, join = 'inner')
temp = -1 * Delta(pd.DataFrame(data1['Open'] * 0.15 + data1['Low'] * 0.85),2)
temp_decay = DecayLinear(temp,3)
r2 = Rank(temp_decay)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(- 1 *np.max(r, axis = 1))
alpha.columns = ['alpha156']
return alpha
@timer
def alpha157(self):
close = self.close
ret = self.ret
close_delta = Delta(close,5)
close_delta_r = Rank(Rank(close_delta) * -1)
r1 = TsMin(close_delta_r,2)
ret_delay = Delay(-1 * ret,6)
r2 = TsRank(ret_delay,5)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
temp = pd.DataFrame(r['r1'] + r['r2'])
alpha = TsMin(temp,5)
alpha.columns = ['alpha157']
return alpha
@timer
def alpha158(self):
high = self.high
low = self.low
close = self.close
temp = SMA(close,15,2)
temp.columns = ['temp']
data = pd.concat([high,low,close,temp],axis = 1 , join = 'inner')
alpha =(data['High'] + data['Low'] - 2 * data['temp'] )/data['Close']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha158']
return alpha
@timer
def alpha159(self):
high = self.high
low = self.low
close = self.close
close_delay = Delay(close,1)
data1 = pd.concat([low,close_delay],axis = 1, join = 'inner')
data2 = pd.concat([high, close_delay], axis = 1, join = 'inner')
temp1 = pd.DataFrame(np.min(data1,axis = 1))
temp2= pd.DataFrame(np.max(data2,axis = 1))
temp = pd.concat([temp1,temp2], axis = 1 ,join = 'inner')
temp.columns = ['temp1','temp2']
temp1_sum6 = Sum(temp1,6)
temp1_sum12 = Sum(temp1,12)
temp1_sum24 = Sum(temp1,24)
tep = pd.DataFrame(temp['temp2'] - temp['temp1'])
s6 = Sum(tep,6)
s12 = Sum(tep,12)
s24 = Sum(tep,24)
data3 = pd.concat([temp1_sum6,temp1_sum12,temp1_sum24,s6,s12,s24], axis = 1 ,join = 'inner')
data3.columns = ['ts6','ts12','ts24','s6','s12','s24']
temp3 = pd.DataFrame(data3['ts6']/data3['s6'] * 12 * 24 + data3['ts12']/data3['s12'] * 6 * 24 \
+ data3['ts24']/data3['s24'] * 6 * 24)
alpha = temp3 / (6*12 + 6*24 + 12*24) * 100
alpha.columns = ['alpha159']
return alpha
@timer
def alpha160(self):
close = self.close
close_std = STD(close,20)
close_delay = Delay(close,1)
data = pd.concat([close,close_std,close_delay],axis = 1, join = 'inner')
data.columns = ['close','close_std','close_delay']
data['close_std'][data['close'] >= data['close_delay']] = 0
alpha = SMA(pd.DataFrame(data['close_std']),20,1)
alpha.columns = ['alpha160']
return alpha
@timer
def alpha161(self):
high = self.high
low = self.low
close = self.close
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data1 = pd.concat([high,low],axis = 1 , join = 'inner')
diff = pd.DataFrame(data1['High'] - data1['Low'])
data2 = pd.concat([close_delay,high], axis = 1, join ='inner')
abs1 = pd.DataFrame(np.abs(data2['close_delay'] - data2['High']))
data3 = pd.concat([diff,abs1], axis = 1, join = 'inner')
temp1 = pd.DataFrame(np.max(data3,axis = 1))
data4 = pd.concat([close_delay,low],axis = 1, join = 'inner')
temp2 = pd.DataFrame(np.abs(data4['close_delay'] -data4['Low']))
data = pd.concat([temp1,temp2],axis =1 , join = 'inner')
data.columns = ['temp1','temp2']
temp = pd.DataFrame(np.max(data, axis = 1))
alpha = Mean(temp,12)
alpha.columns = ['alpha161']
return alpha
@timer
def alpha162(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
data.columns = ['close','close_delay']
data['max']= data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
temp1 = SMA(pd.DataFrame(data['max']),12,1)
temp2 = SMA(pd.DataFrame(data['abs']),12,1)
data1 = pd.concat([temp1,temp2], axis = 1, join = 'inner')
data1.columns = ['temp1','temp2']
tep = pd.DataFrame(data1['temp1']/data1['temp2'])
temp3 = TsMin(tep,12)
temp4 = TsMax(tep,12)
data_temp = pd.concat([tep,temp3,temp4], axis = 1, join = 'inner')
data_temp.columns = ['tep','temp3','temp4']
alpha = (data_temp['tep'] - data_temp['temp3']/data_temp['temp4']) * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha162']
return alpha
@timer
def alpha163(self):
low = self.low
high = self.high
volume = self.volume
ret = self.ret
vwap = self.vwap
volume_mean = Mean(volume,20)
data = pd.concat([high,low,vwap,ret,volume_mean],axis = 1, join = 'inner')
data.columns = ['high','low','vwap','ret','volume_mean']
temp = pd.DataFrame(-1 *data['ret'] * data['volume_mean'] *data['vwap'] * \
(data['high'] - data['low']))
alpha = Rank(temp)
alpha.columns = ['alpha163']
return alpha
@timer
def alpha164(self):
close = self.close
high = self.high
low = self.low
close_delay = Delay(close,1)
data = pd.concat([close,high,low,close_delay],axis = 1, join = 'inner')
data.columns = ['close','high','low','close_delay']
data['temp'] = 1/(data['close'] - data['close_delay'])
data_min = TsMin(pd.DataFrame(data['temp']),12)
data_min.columns = ['min']
data2 = pd.concat([data,data_min],axis = 1, join = 'inner')
data2['tep'] = data2['temp'] - data2['min']/(data2['high'] - data2['low'])
data2['tep'][data['close'] <= data['close_delay']] = 0
alpha = SMA(pd.DataFrame(data2['tep']) * 100,13,2)
alpha.columns = ['alpha164']
return alpha
@timer
def alpha165(self):
close = self.close
close_mean = Mean(close,48)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
temp = pd.DataFrame(data['close'] - data['close_mean'])
temp_sum = Sum(temp,48)
temp_sum_min = TsMin(temp_sum,48)
temp_sum_max = TsMax(temp_sum,48)
close_std = STD(close,48)
data_temp = pd.concat([temp_sum_min,temp_sum_max,close_std], axis = 1, join = 'inner')
data_temp.columns = ['min','max','std']
alpha = (data_temp['max'] - data_temp['min'])/data_temp['std']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha165']
return alpha
@timer
def alpha166(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame(data['close']/data['close_delay'])
temp_mean = Mean(temp,20)
data1 = pd.concat([temp,temp_mean], axis = 1, join = 'inner')
data1.columns = ['temp','temp_mean']
temp2 = Sum(pd.DataFrame(data1['temp'] - data1['temp_mean']),20) * 20 * 19
temp3 = Sum(temp,20) * 19 * 18
data2 = pd.concat([temp2,temp3], axis = 1, join = 'inner')
data2.columns = ['temp2','temp3']
alpha = np.power(data2['temp2'],1.5)/np.power(data2['temp3'],1.5)
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha166']
return alpha
@timer
def alpha167(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
data['temp'] = data['close'] - data['close_delay']
data['temp'][data['close'] <= data['close_delay']] = 0
alpha = Sum(pd.DataFrame(data['temp']),12)
alpha.columns = ['alpha167']
return alpha
@timer
def alpha168(self):
volume = self.volume
volume_mean = Mean(volume,20)
data = pd.concat([volume,volume_mean], axis = 1, join = 'inner')
data.columns = ['volume','volume_mean']
alpha = data['volume']/data['volume_mean'] * -1
alpha.columns = ['alpha168']
return alpha
@timer
def alpha169(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
temp1 = pd.DataFrame(data['close'] - data['close_delay'])
sma = SMA(temp1,9,1)
temp2 = Delay(sma,1)
temp2_mean12 = Mean(temp2,12)
temp2_mean26 = Mean(temp2,26)
data2 = pd.concat([temp2_mean12,temp2_mean26], axis = 1, join ='inner')
data2.columns = ['mean1','mean2']
alpha = SMA(pd.DataFrame(data2['mean1'] - data2['mean2']),10,1)
alpha.columns = ['alpha169']
return alpha
@timer
def alpha170(self):
close = self.close
high = self.high
volume = self.volume
vwap = self.vwap
volume_mean = Mean(volume,20)
data1 = pd.concat([high,close,volume,volume_mean], axis = 1, join = 'inner')
data1.columns =['high','close','volume','volume_mean']
temp1 = pd.DataFrame(data1['high']/data1['close'] * data1['volume']/data1['volume_mean'])
r1 = Rank(temp1)
high_mean = Mean(high,5)
vwap_delay = Delay(vwap,5)
data2 = pd.concat([high,close,high_mean], axis = 1, join = 'inner')
data2.columns = ['high','close','high_mean']
temp2 = pd.DataFrame((data2['high'] - data2['close'])/data2['high_mean'])
temp2_r = Rank(temp2)
data3 = pd.concat([vwap,vwap_delay], axis = 1, join = 'inner')
data3.columns = ['vwap','vwap_delay']
temp3 = pd.DataFrame(data3['vwap'] - data3['vwap_delay'])
temp3_r = Rank(temp3)
rank = pd.concat([temp2_r,temp3_r], axis = 1, join = 'inner')
rank.columns = ['r1','r2']
r2 = | pd.DataFrame(rank['r1'] - rank['r2']) | pandas.DataFrame |
import six
import warnings
from onecodex.exceptions import OneCodexException
from onecodex.lib.enums import AbundanceMetric, Rank
from onecodex.viz import (
VizPCAMixin,
VizHeatmapMixin,
VizMetadataMixin,
VizDistanceMixin,
VizBargraphMixin,
)
class AnalysisMixin(
VizPCAMixin, VizHeatmapMixin, VizMetadataMixin, VizDistanceMixin, VizBargraphMixin
):
"""Contains methods for analyzing Classifications results.
Notes
-----
Three DataFrames are required by most methods: collated counts, collated metadata, and taxonomy.
This data is obtained from either a `ClassificationsDataFrame` or a `SampleCollection`. Both
classes use this mixin. `AnalysisMixin` pulls additional methods in from `onecodex.distance`,
`onecodex.taxonomy`, and `onecodex.viz`.
"""
def _get_auto_rank(self, rank):
"""Attempt to figure out what rank we should use for analyses."""
if rank == Rank.Auto:
# if we're an accessor for a ClassificationsDataFrame, use its _rank property
if self.__class__.__name__ == "OneCodexAccessor":
return self._rank
if AbundanceMetric.has_value(self._metric) or self._is_metagenomic:
return Rank.Species
else:
return Rank.Genus
else:
return rank
def _guess_normalized(self):
"""Return True if the collated counts in `self._results` appear to be normalized.
Notes
-----
It's possible that the _results df has already been normalized, which can cause some
methods to fail. This method lets us guess whether that's true and act accordingly.
"""
return (
getattr(self, "_normalized", False)
or AbundanceMetric.has_value(self._metric)
or bool((self._results.sum(axis=1).round(4) == 1.0).all())
) # noqa
def _metadata_fetch(self, metadata_fields, label=None):
"""Fetch and transform given metadata fields from `self.metadata`.
Takes a list of metadata fields, some of which can contain taxon names or taxon IDs, and
returns a DataFrame with transformed data that can be used for plotting.
Parameters
----------
metadata_fields : `list` of `string`
A list of metadata fields, taxon names, or taxon IDs to fetch and transform for display.
label : `string` or `callable`, optional
A metadata field (or function) used to label each analysis. If passing a function, a
dict containing the metadata for each analysis is passed as the first and only
positional argument. The callable function must return a string.
If this argument is not given, and "Label" is in `metadata_fields`, "Label" will be set
to the filename associated with an analysis.
Notes
-----
Taxon names and IDs are transformed into the relative abundances of those taxa within their
own rank. For example, 'Bacteroides' will return the relative abundances of 'Bacteroides'
among all taxa of rank genus. Taxon IDs are stored as strings in `ClassificationsDataFrame`
and are coerced to strings if integers are given.
Metadata fields are returned as is, from the `self.metadata` DataFrame. If multiple metadata
fields are specified in a tuple, their values are joined as strings separated by underscore.
Multiple metadata fields in a tuple must both be categorical. That is, a numerical field and
boolean can not be joined, or the result would be something like '87.4_True'.
Returns
-------
`pandas.DataFrame`
Columns are renamed (if applicable) metadata fields and rows are `Classifications.id`.
Elements are transformed values. Not all metadata fields will have been renamed, but will
be present in the below `dict` nonetheless.
`dict`
Keys are metadata fields and values are renamed metadata fields. This can be used to map
metadata fields which were passed to this function, to prettier names. For example, if
'bacteroid' is passed, it will be matched with the Bacteroides genus and renamed to
'Bacteroides (816)', which includes its taxon ID.
"""
import pandas as pd
help_metadata = ", ".join(self.metadata.keys())
magic_metadata = pd.DataFrame({"classification_id": self._results.index}).set_index(
"classification_id"
)
# if user passed label kwarg but didn't put "Label" in the fields, assume the user wants
# that field added
if label is not None and "Label" not in metadata_fields:
metadata_fields.append("Label")
# if we magically rename fields, keep track
magic_fields = {}
for f in set([f for f in metadata_fields if f]):
if isinstance(f, tuple):
# joined categorical metadata
for field in f:
if field not in self.metadata:
raise OneCodexException(
"Metric {} not found. Choose from: {}".format(field, help_metadata)
)
if not (
pd.api.types.is_bool_dtype(self.metadata[field])
or pd.api.types.is_categorical_dtype(self.metadata[field]) # noqa
or | pd.api.types.is_object_dtype(self.metadata[field]) | pandas.api.types.is_object_dtype |
import argparse
from pathlib import Path
import multiprocessing
import pandas as pd
import tqdm
from utils import read_img_cv
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data-path', type=str, default='./data/test_tiles_1024')
parser.add_argument('--preds-path', type=str, required=True)
return parser.parse_args()
def process_img(row):
_, item = row
return item.fname, read_img_cv(item.mask_path).any()
def main():
args = parse_args()
print(args)
data_path = Path(args.data_path)
img_paths = list(data_path.glob('*.jpg'))
test = | pd.DataFrame(img_paths, columns=['fname']) | pandas.DataFrame |
import numpy as np
import pandas as pd
from pandas import (
Categorical,
DataFrame,
MultiIndex,
Series,
Timestamp,
date_range,
)
from .pandas_vb_common import tm
try:
from pandas.tseries.offsets import (
Hour,
Nano,
)
except ImportError:
# For compatibility with older versions
from pandas.core.datetools import (
Hour,
Nano,
)
class FromDicts:
def setup(self):
N, K = 5000, 50
self.index = tm.makeStringIndex(N)
self.columns = tm.makeStringIndex(K)
frame = DataFrame(np.random.randn(N, K), index=self.index, columns=self.columns)
self.data = frame.to_dict()
self.dict_list = frame.to_dict(orient="records")
self.data2 = {i: {j: float(j) for j in range(100)} for i in range(2000)}
# arrays which we wont consolidate
self.dict_of_categoricals = {i: Categorical(np.arange(N)) for i in range(K)}
def time_list_of_dict(self):
DataFrame(self.dict_list)
def time_nested_dict(self):
DataFrame(self.data)
def time_nested_dict_index(self):
DataFrame(self.data, index=self.index)
def time_nested_dict_columns(self):
| DataFrame(self.data, columns=self.columns) | pandas.DataFrame |
"""
This is a pseudo-public API for downstream libraries. We ask that downstream
authors
1) Try to avoid using internals directly altogether, and failing that,
2) Use only functions exposed here (or in core.internals)
"""
from __future__ import annotations
from collections import defaultdict
from typing import DefaultDict
import numpy as np
from pandas._libs.internals import BlockPlacement
from pandas._typing import (
ArrayLike,
Dtype,
)
from pandas.core.dtypes.common import (
is_datetime64tz_dtype,
pandas_dtype,
)
from pandas.core.arrays import DatetimeArray
from pandas.core.construction import extract_array
from pandas.core.indexes.api import Index
from pandas.core.internals.blocks import (
Block,
CategoricalBlock,
DatetimeTZBlock,
ExtensionBlock,
check_ndim,
ensure_block_shape,
extract_pandas_array,
get_block_type,
maybe_coerce_values,
new_block,
)
from pandas.core.internals.managers import (
BlockManager,
construction_error,
multi_blockify,
simple_blockify,
)
def make_block(
values, placement, klass=None, ndim=None, dtype: Dtype | None = None
) -> Block:
"""
This is a pseudo-public analogue to blocks.new_block.
We ask that downstream libraries use this rather than any fully-internal
APIs, including but not limited to:
- core.internals.blocks.make_block
- Block.make_block
- Block.make_block_same_class
- Block.__init__
"""
if dtype is not None:
dtype = pandas_dtype(dtype)
values, dtype = extract_pandas_array(values, dtype, ndim)
if klass is None:
dtype = dtype or values.dtype
klass = get_block_type(values, dtype)
elif klass is DatetimeTZBlock and not is_datetime64tz_dtype(values.dtype):
# pyarrow calls get here
values = | DatetimeArray._simple_new(values, dtype=dtype) | pandas.core.arrays.DatetimeArray._simple_new |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import os
import json
# Feature selection strategies
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import SelectFromModel
# Scale feature scores
from sklearn.preprocessing import MinMaxScaler
# SKLearn estimators list
from sklearn.utils import all_estimators
# MLRun utils
from mlrun.mlutils.plots import gcf_clear
from mlrun.utils.helpers import create_class
from mlrun.artifacts import PlotArtifact
def show_values_on_bars(axs, h_v="v", space=0.4):
def _show_on_single_plot(ax):
if h_v == "v":
for p in ax.patches:
_x = p.get_x() + p.get_width() / 2
_y = p.get_y() + p.get_height()
value = int(p.get_height())
ax.text(_x, _y, value, ha="center")
elif h_v == "h":
for p in ax.patches:
_x = p.get_x() + p.get_width() + float(space)
_y = p.get_y() + p.get_height()
value = int(p.get_width())
ax.text(_x, _y, value, ha="left")
if isinstance(axs, np.ndarray):
for idx, ax in np.ndenumerate(axs):
_show_on_single_plot(ax)
else:
_show_on_single_plot(axs)
def plot_stat(context,
stat_name,
stat_df):
gcf_clear(plt)
# Add chart
ax = plt.axes()
stat_chart = sns.barplot(x=stat_name,
y='index',
data=stat_df.sort_values(stat_name, ascending=False).reset_index(),
ax=ax)
plt.tight_layout()
for p in stat_chart.patches:
width = p.get_width()
plt.text(5 + p.get_width(), p.get_y() + 0.55 * p.get_height(),
'{:1.2f}'.format(width),
ha='center', va='center')
context.log_artifact(PlotArtifact(f'{stat_name}', body=plt.gcf()),
local_path=os.path.join('plots', 'feature_selection', f'{stat_name}.html'))
gcf_clear(plt)
def feature_selection(context,
df_artifact,
k=2,
min_votes=0.5,
label_column: str = 'Y',
stat_filters=['f_classif', 'mutual_info_classif', 'chi2', 'f_regression'],
model_filters={'LinearSVC': 'LinearSVC',
'LogisticRegression': 'LogisticRegression',
'ExtraTreesClassifier': 'ExtraTreesClassifier'},
max_scaled_scores=True):
"""Applies selected feature selection statistical functions
or models on our 'df_artifact'.
Each statistical function or model will vote for it's best K selected features.
If a feature has >= 'min_votes' votes, it will be selected.
:param context: the function context
:param k: number of top features to select from each statistical
function or model
:param min_votes: minimal number of votes (from a model or by statistical
function) needed for a feature to be selected.
Can be specified by percentage of votes or absolute
number of votes
:param label_column: ground-truth (y) labels
:param stat_filters: statistical functions to apply to the features
(from sklearn.feature_selection)
:param model_filters: models to use for feature evaluation, can be specified by
model name (ex. LinearSVC), formalized json (contains 'CLASS',
'FIT', 'META') or a path to such json file.
:param max_scaled_scores: produce feature scores table scaled with max_scaler
"""
# Read input DF
df_path = str(df_artifact)
context.logger.info(f'input dataset {df_path}')
if df_path.endswith('csv'):
df = | pd.read_csv(df_path) | pandas.read_csv |
from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, f1_score
import numpy as np
import pandas as pd
import seaborn as sn
import matplotlib.pyplot as plt
def _store_X(X, store_X):
if store_X is False or X is None:
return None
else:
if isinstance(X, pd.DataFrame):
return X
else:
return pd.DataFrame(X)
def _clust_centers(centers):
clusters_col_names = ['Feature ' + str(x + 1) for x in range(np.shape(centers)[1])]
ct = pd.DataFrame(centers, columns = clusters_col_names)
return ct
def _prior(y_true, digits):
class_weight = None
class_count = None
if y_true is not None:
labs = pd.DataFrame(y_true)
class_weight = round(labs.value_counts(normalize=True), digits)
class_count = labs.value_counts(normalize=False)
return class_weight, class_count
def _class_pred(obj, X, X_train, y_pred, y_train, y_true, y_true_train, prob_return, average = 'binary', digits = 3):
y_pred_prob = None
acc = None
ce = None
prc = None
rcl = None
f1 = None
conf = None
class_weight = None
class_count = None
y_pred_prob_train = None
acc_train = None
ce_train = None
prc_train = None
rcl_train = None
f1_train = None
conf_train = None
class_weight_train = None
class_count_train = None
if y_pred is not None:
labs = pd.DataFrame(y_pred)
class_weight = round(labs.value_counts(normalize=True), digits)
class_count = labs.value_counts(normalize=False)
if y_true is not None:
if pd.Series(y_true).unique().shape[0] > 2:
average = 'micro'
acc = round(accuracy_score(y_true, y_pred),digits)
ce = round(1-acc,digits)
prc = round(precision_score(y_true, y_pred, average=average),digits)
rcl = round(recall_score(y_true, y_pred, average=average),digits)
f1 = round(f1_score(y_true, y_pred, average = average),digits)
conf = round(pd.DataFrame(confusion_matrix(y_true, y_pred)),digits)
elif X is not None:
y_pred = obj.predict(X)
if prob_return is True:
y_pred_prob = obj.predict_proba(X)
labs = pd.DataFrame(y_pred)
class_weight = round(labs.value_counts(normalize=True), digits)
class_count = labs.value_counts(normalize=False)
if y_true is not None:
if pd.Series(y_true).unique().shape[0] > 2:
average = 'micro'
acc = round(accuracy_score(y_true, y_pred),digits)
ce = round(1-acc,digits)
prc = round(precision_score(y_true, y_pred, average=average),digits)
rcl = round(recall_score(y_true, y_pred, average=average),digits)
f1 = round(f1_score(y_true, y_pred, average = average),digits)
conf = round(pd.DataFrame(confusion_matrix(y_true, y_pred)),digits)
if y_train is not None:
labs = pd.DataFrame(y_train)
class_weight_train = round(labs.value_counts(normalize=True),digits)
class_count_train = labs.value_counts(normalize=False)
if y_true_train is not None:
if pd.Series(y_true_train).unique().shape[0] > 2:
average = 'micro'
acc_train = round(accuracy_score(y_true_train, y_train), digits)
ce_train = round(1-acc_train, digits)
prc_train = round(precision_score(y_true_train, y_train, average=average), digits)
rcl_train = round(recall_score(y_true_train, y_train, average=average), digits)
f1_train = round(f1_score(y_true_train, y_train, average = average), digits)
conf_train = round(pd.DataFrame(confusion_matrix(y_true_train, y_train)), digits)
elif X_train is not None:
y_train = obj.predict(X_train)
if prob_return is True:
y_pred_prob_train = obj.predict_proba(X_train)
labs = pd.DataFrame(y_train)
class_weight_train = round(labs.value_counts(normalize=True),digits)
class_count_train = labs.value_counts(normalize=False)
if y_true_train is not None:
if pd.Series(y_true_train).unique().shape[0] > 2:
average = 'micro'
acc_train = round(accuracy_score(y_true_train, y_train), digits)
ce_train = round(1-acc_train, digits)
prc_train = round(precision_score(y_true_train, y_train, average=average), digits)
rcl_train = round(recall_score(y_true_train, y_train, average=average), digits)
f1_train = round(f1_score(y_true_train, y_train, average=average), digits)
conf_train = round(pd.DataFrame(confusion_matrix(y_true_train, y_train)), digits)
return y_pred, y_true, y_pred_prob, class_weight, class_count, acc, \
prc, rcl, f1, conf, y_train, y_pred_prob_train, \
class_weight_train, class_count_train, acc_train, \
prc_train, rcl_train, f1_train, conf_train, ce, ce_train, y_true_train
def _features_important(features, X):
if X is None:
ft = pd.Series(features).sort_values(ascending=False)
else:
XX = X.copy()
XX = | pd.DataFrame(XX) | pandas.DataFrame |
import pandas as pd
import numpy as np
from sklearn import linear_model
from itertools import combinations
from .stats import *
from functools import partial
import multiprocessing as mp
from tqdm import tqdm
def csRenameOrth(adQuery,adTrain,orthTable,speciesQuery='human',speciesTrain='mouse'):
_,_,cgenes=np.intersect1d(adQuery.var_names.values, orthTable[speciesQuery], return_indices=True)
_,_,ccgenes=np.intersect1d(adTrain.var_names.values, orthTable[speciesTrain], return_indices=True)
temp1=np.zeros(len(orthTable.index.values), dtype=bool)
temp2=np.zeros(len(orthTable.index.values), dtype=bool)
temp1[cgenes]=True
temp2[ccgenes]=True
common=np.logical_and(temp1, temp2)
oTab=orthTable.loc[common.T,:]
adT=adTrain[:, oTab[speciesTrain]]
adQ=adQuery[:, oTab[speciesQuery]]
adQ.var_names = adT.var_names
return [adQ, adT]
def csRenameOrth2(expQuery,expTrain,orthTable,speciesQuery='human',speciesTrain='mouse'):
_,_,cgenes=np.intersect1d(expQuery.columns.values, orthTable[speciesQuery], return_indices=True)
_,_,ccgenes=np.intersect1d(expTrain.columns.values, orthTable[speciesTrain], return_indices=True)
temp1=np.zeros(len(orthTable.index.values), dtype=bool)
temp2=np.zeros(len(orthTable.index.values), dtype=bool)
temp1[cgenes]=True
temp2[ccgenes]=True
common=np.logical_and(temp1, temp2)
oTab=orthTable.loc[common.T,:]
expT=expTrain.loc[:, oTab[speciesTrain]]
expQ=expQuery.loc[:, oTab[speciesQuery]]
expQ.columns= expT.columns
return [expQ, expT]
def makePairTab(genes):
pairs = list(combinations(genes, 2))
labels = ['genes1', 'genes2']
pTab = | pd.DataFrame(data=pairs, columns=labels) | pandas.DataFrame |
from GoogleNews import GoogleNews
import pandas as pd
import dataframe_image as dfi
from datetime import date,timedelta
class newsDataCapturing():
def getnewsData(self):
today = date.today()
T_split = str(today).split('-')
toDate = T_split[2]+'/'+T_split[1]+'/'+T_split[0]
googlenewsMkt=GoogleNews(start=toDate,end=toDate)
googlenewsMkt.get_news('Market')
result=googlenewsMkt.results()
df=pd.DataFrame(result).head(10)
dfi.export(df, './template/df_styled_Market.jpeg')
googlenewsBiz=GoogleNews(start=toDate,end=toDate)
googlenewsBiz.get_news('Business')
result=googlenewsBiz.results()
df= | pd.DataFrame(result) | pandas.DataFrame |
import sys
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
import nltk
nltk.download('wordnet')
nltk.download('stopwords')
from warnings import simplefilter
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from sklearn.multioutput import MultiOutputClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import classification_report
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.model_selection import GridSearchCV
from sklearn.externals import joblib
stop_words = set(stopwords.words('english'))
def load_data(database_filepath):
"""Loads data from the database.
Args:
database_filepath (str): Path to the database file
Returns:
(pd.Series, pd.DataFrame, list): A tuple containing messages as a first element, categories as a second element
and a list of category names as the third element
"""
engine = create_engine('sqlite:///{}'.format(database_filepath))
df = | pd.read_sql_table('messages', con=engine) | pandas.read_sql_table |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 22 11:00:00 2021
@author: user
"""
import pandas as pd
def pearson(x, y):
'''
Reference Paper: An introduction to variable and feature selection(Isabelle Guyon)
section 2.2
適用於線性
Parameters
----------
x : m*n DataFrame
m is # of data(examples)
n is # of features
y : m*1 array
output of the model(target)
Returns
-------
m*1 Series
Sorted square Pearson coefficient
Pearson = -1~1
'''
y_term = y - y.mean()
pearson_list = []
# 計算每種feature的pearson^2
for i in range(x.shape[1]):
x_term = x.iloc[:, i] - x.iloc[:, i].mean()
# 分子
numerator = (x_term * y_term).sum()
# 分母
denominator = ((x_term ** 2).sum() * (y_term ** 2).sum()) ** (1/2)
# 公式(2)平方
pearson_list.append((numerator / denominator)**2)
pearson = pd.Series(pearson_list, index=x.columns)
pearson.sort_values(inplace=True)
return pearson
def spearman(x, y):
'''
Reference:
https://www.youtube.com/watch?v=Zc9sm1irUx8&ab_channel=CUSTCourses
適用於非線性
Parameters
----------
x : m*n DataFrame
m is # of data(examples)
n is # of features
y : m*1 array
output of the model(target)
Returns
-------
m*1 Series
Sorted square Spearman coefficient
Spearman = -1~1
'''
rank_x = x.rank()
rank_y = y.rank()
# 計算每種feature的spearman
spearman_list = []
for i in range(rank_x.shape[1]):
d = rank_x.iloc[:, i] - rank_y
d = d**2
# 分子
numerator = 6 * d.sum()
# 分母
n = rank_x.shape[0]
denominator = n * (n**2 - 1)
spearman = 1 - (numerator / denominator)
# 原本的spearman係數取平方
spearman_list.append(spearman**2)
spearman = | pd.Series(spearman_list, index=x.columns) | pandas.Series |
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
lreshape,
melt,
wide_to_long,
)
import pandas._testing as tm
class TestMelt:
def setup_method(self, method):
self.df = tm.makeTimeDataFrame()[:10]
self.df["id1"] = (self.df["A"] > 0).astype(np.int64)
self.df["id2"] = (self.df["B"] > 0).astype(np.int64)
self.var_name = "var"
self.value_name = "val"
self.df1 = DataFrame(
[
[1.067683, -1.110463, 0.20867],
[-1.321405, 0.368915, -1.055342],
[-0.807333, 0.08298, -0.873361],
]
)
self.df1.columns = [list("ABC"), list("abc")]
self.df1.columns.names = ["CAP", "low"]
def test_top_level_method(self):
result = melt(self.df)
assert result.columns.tolist() == ["variable", "value"]
def test_method_signatures(self):
tm.assert_frame_equal(self.df.melt(), melt(self.df))
tm.assert_frame_equal(
self.df.melt(id_vars=["id1", "id2"], value_vars=["A", "B"]),
melt(self.df, id_vars=["id1", "id2"], value_vars=["A", "B"]),
)
tm.assert_frame_equal(
self.df.melt(var_name=self.var_name, value_name=self.value_name),
melt(self.df, var_name=self.var_name, value_name=self.value_name),
)
tm.assert_frame_equal(self.df1.melt(col_level=0), melt(self.df1, col_level=0))
def test_default_col_names(self):
result = self.df.melt()
assert result.columns.tolist() == ["variable", "value"]
result1 = self.df.melt(id_vars=["id1"])
assert result1.columns.tolist() == ["id1", "variable", "value"]
result2 = self.df.melt(id_vars=["id1", "id2"])
assert result2.columns.tolist() == ["id1", "id2", "variable", "value"]
def test_value_vars(self):
result3 = self.df.melt(id_vars=["id1", "id2"], value_vars="A")
assert len(result3) == 10
result4 = self.df.melt(id_vars=["id1", "id2"], value_vars=["A", "B"])
expected4 = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
"variable": ["A"] * 10 + ["B"] * 10,
"value": (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", "variable", "value"],
)
tm.assert_frame_equal(result4, expected4)
def test_value_vars_types(self):
# GH 15348
expected = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
"variable": ["A"] * 10 + ["B"] * 10,
"value": (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", "variable", "value"],
)
for type_ in (tuple, list, np.array):
result = self.df.melt(id_vars=["id1", "id2"], value_vars=type_(("A", "B")))
tm.assert_frame_equal(result, expected)
def test_vars_work_with_multiindex(self):
expected = DataFrame(
{
("A", "a"): self.df1[("A", "a")],
"CAP": ["B"] * len(self.df1),
"low": ["b"] * len(self.df1),
"value": self.df1[("B", "b")],
},
columns=[("A", "a"), "CAP", "low", "value"],
)
result = self.df1.melt(id_vars=[("A", "a")], value_vars=[("B", "b")])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"id_vars, value_vars, col_level, expected",
[
(
["A"],
["B"],
0,
DataFrame(
{
"A": {0: 1.067683, 1: -1.321405, 2: -0.807333},
"CAP": {0: "B", 1: "B", 2: "B"},
"value": {0: -1.110463, 1: 0.368915, 2: 0.08298},
}
),
),
(
["a"],
["b"],
1,
DataFrame(
{
"a": {0: 1.067683, 1: -1.321405, 2: -0.807333},
"low": {0: "b", 1: "b", 2: "b"},
"value": {0: -1.110463, 1: 0.368915, 2: 0.08298},
}
),
),
],
)
def test_single_vars_work_with_multiindex(
self, id_vars, value_vars, col_level, expected
):
result = self.df1.melt(id_vars, value_vars, col_level=col_level)
tm.assert_frame_equal(result, expected)
def test_tuple_vars_fail_with_multiindex(self):
# melt should fail with an informative error message if
# the columns have a MultiIndex and a tuple is passed
# for id_vars or value_vars.
tuple_a = ("A", "a")
list_a = [tuple_a]
tuple_b = ("B", "b")
list_b = [tuple_b]
msg = r"(id|value)_vars must be a list of tuples when columns are a MultiIndex"
for id_vars, value_vars in (
(tuple_a, list_b),
(list_a, tuple_b),
(tuple_a, tuple_b),
):
with pytest.raises(ValueError, match=msg):
self.df1.melt(id_vars=id_vars, value_vars=value_vars)
def test_custom_var_name(self):
result5 = self.df.melt(var_name=self.var_name)
assert result5.columns.tolist() == ["var", "value"]
result6 = self.df.melt(id_vars=["id1"], var_name=self.var_name)
assert result6.columns.tolist() == ["id1", "var", "value"]
result7 = self.df.melt(id_vars=["id1", "id2"], var_name=self.var_name)
assert result7.columns.tolist() == ["id1", "id2", "var", "value"]
result8 = self.df.melt(
id_vars=["id1", "id2"], value_vars="A", var_name=self.var_name
)
assert result8.columns.tolist() == ["id1", "id2", "var", "value"]
result9 = self.df.melt(
id_vars=["id1", "id2"], value_vars=["A", "B"], var_name=self.var_name
)
expected9 = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
self.var_name: ["A"] * 10 + ["B"] * 10,
"value": (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", self.var_name, "value"],
)
tm.assert_frame_equal(result9, expected9)
def test_custom_value_name(self):
result10 = self.df.melt(value_name=self.value_name)
assert result10.columns.tolist() == ["variable", "val"]
result11 = self.df.melt(id_vars=["id1"], value_name=self.value_name)
assert result11.columns.tolist() == ["id1", "variable", "val"]
result12 = self.df.melt(id_vars=["id1", "id2"], value_name=self.value_name)
assert result12.columns.tolist() == ["id1", "id2", "variable", "val"]
result13 = self.df.melt(
id_vars=["id1", "id2"], value_vars="A", value_name=self.value_name
)
assert result13.columns.tolist() == ["id1", "id2", "variable", "val"]
result14 = self.df.melt(
id_vars=["id1", "id2"], value_vars=["A", "B"], value_name=self.value_name
)
expected14 = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
"variable": ["A"] * 10 + ["B"] * 10,
self.value_name: (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", "variable", self.value_name],
)
tm.assert_frame_equal(result14, expected14)
def test_custom_var_and_value_name(self):
result15 = self.df.melt(var_name=self.var_name, value_name=self.value_name)
assert result15.columns.tolist() == ["var", "val"]
result16 = self.df.melt(
id_vars=["id1"], var_name=self.var_name, value_name=self.value_name
)
assert result16.columns.tolist() == ["id1", "var", "val"]
result17 = self.df.melt(
id_vars=["id1", "id2"], var_name=self.var_name, value_name=self.value_name
)
assert result17.columns.tolist() == ["id1", "id2", "var", "val"]
result18 = self.df.melt(
id_vars=["id1", "id2"],
value_vars="A",
var_name=self.var_name,
value_name=self.value_name,
)
assert result18.columns.tolist() == ["id1", "id2", "var", "val"]
result19 = self.df.melt(
id_vars=["id1", "id2"],
value_vars=["A", "B"],
var_name=self.var_name,
value_name=self.value_name,
)
expected19 = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
self.var_name: ["A"] * 10 + ["B"] * 10,
self.value_name: (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", self.var_name, self.value_name],
)
tm.assert_frame_equal(result19, expected19)
df20 = self.df.copy()
df20.columns.name = "foo"
result20 = df20.melt()
assert result20.columns.tolist() == ["foo", "value"]
def test_col_level(self):
res1 = self.df1.melt(col_level=0)
res2 = self.df1.melt(col_level="CAP")
assert res1.columns.tolist() == ["CAP", "value"]
assert res2.columns.tolist() == ["CAP", "value"]
def test_multiindex(self):
res = self.df1.melt()
assert res.columns.tolist() == ["CAP", "low", "value"]
@pytest.mark.parametrize(
"col",
[
pd.Series(pd.date_range("2010", periods=5, tz="US/Pacific")),
pd.Series(["a", "b", "c", "a", "d"], dtype="category"),
pd.Series([0, 1, 0, 0, 0]),
],
)
def test_pandas_dtypes(self, col):
# GH 15785
df = DataFrame(
{"klass": range(5), "col": col, "attr1": [1, 0, 0, 0, 0], "attr2": col}
)
expected_value = pd.concat([pd.Series([1, 0, 0, 0, 0]), col], ignore_index=True)
result = melt(
df, id_vars=["klass", "col"], var_name="attribute", value_name="value"
)
expected = DataFrame(
{
0: list(range(5)) * 2,
1: pd.concat([col] * 2, ignore_index=True),
2: ["attr1"] * 5 + ["attr2"] * 5,
3: expected_value,
}
)
expected.columns = ["klass", "col", "attribute", "value"]
tm.assert_frame_equal(result, expected)
def test_preserve_category(self):
# GH 15853
data = DataFrame({"A": [1, 2], "B": pd.Categorical(["X", "Y"])})
result = melt(data, ["B"], ["A"])
expected = DataFrame(
{"B": pd.Categorical(["X", "Y"]), "variable": ["A", "A"], "value": [1, 2]}
)
tm.assert_frame_equal(result, expected)
def test_melt_missing_columns_raises(self):
# GH-23575
# This test is to ensure that pandas raises an error if melting is
# attempted with column names absent from the dataframe
# Generate data
df = DataFrame(np.random.randn(5, 4), columns=list("abcd"))
# Try to melt with missing `value_vars` column name
msg = "The following '{Var}' are not present in the DataFrame: {Col}"
with pytest.raises(
KeyError, match=msg.format(Var="value_vars", Col="\\['C'\\]")
):
df.melt(["a", "b"], ["C", "d"])
# Try to melt with missing `id_vars` column name
with pytest.raises(KeyError, match=msg.format(Var="id_vars", Col="\\['A'\\]")):
df.melt(["A", "b"], ["c", "d"])
# Multiple missing
with pytest.raises(
KeyError,
match=msg.format(Var="id_vars", Col="\\['not_here', 'or_there'\\]"),
):
df.melt(["a", "b", "not_here", "or_there"], ["c", "d"])
# Multiindex melt fails if column is missing from multilevel melt
multi = df.copy()
multi.columns = [list("ABCD"), list("abcd")]
with pytest.raises(KeyError, match=msg.format(Var="id_vars", Col="\\['E'\\]")):
multi.melt([("E", "a")], [("B", "b")])
# Multiindex fails if column is missing from single level melt
with pytest.raises(
KeyError, match=msg.format(Var="value_vars", Col="\\['F'\\]")
):
multi.melt(["A"], ["F"], col_level=0)
def test_melt_mixed_int_str_id_vars(self):
# GH 29718
df = DataFrame({0: ["foo"], "a": ["bar"], "b": [1], "d": [2]})
result = melt(df, id_vars=[0, "a"], value_vars=["b", "d"])
expected = DataFrame(
{0: ["foo"] * 2, "a": ["bar"] * 2, "variable": list("bd"), "value": [1, 2]}
)
tm.assert_frame_equal(result, expected)
def test_melt_mixed_int_str_value_vars(self):
# GH 29718
df = DataFrame({0: ["foo"], "a": ["bar"]})
result = melt(df, value_vars=[0, "a"])
expected = DataFrame({"variable": [0, "a"], "value": ["foo", "bar"]})
tm.assert_frame_equal(result, expected)
def test_ignore_index(self):
# GH 17440
df = DataFrame({"foo": [0], "bar": [1]}, index=["first"])
result = melt(df, ignore_index=False)
expected = DataFrame(
{"variable": ["foo", "bar"], "value": [0, 1]}, index=["first", "first"]
)
tm.assert_frame_equal(result, expected)
def test_ignore_multiindex(self):
# GH 17440
index = pd.MultiIndex.from_tuples(
[("first", "second"), ("first", "third")], names=["baz", "foobar"]
)
df = DataFrame({"foo": [0, 1], "bar": [2, 3]}, index=index)
result = melt(df, ignore_index=False)
expected_index = pd.MultiIndex.from_tuples(
[("first", "second"), ("first", "third")] * 2, names=["baz", "foobar"]
)
expected = DataFrame(
{"variable": ["foo"] * 2 + ["bar"] * 2, "value": [0, 1, 2, 3]},
index=expected_index,
)
tm.assert_frame_equal(result, expected)
def test_ignore_index_name_and_type(self):
# GH 17440
index = pd.Index(["foo", "bar"], dtype="category", name="baz")
df = DataFrame({"x": [0, 1], "y": [2, 3]}, index=index)
result = melt(df, ignore_index=False)
expected_index = pd.Index(["foo", "bar"] * 2, dtype="category", name="baz")
expected = DataFrame(
{"variable": ["x", "x", "y", "y"], "value": [0, 1, 2, 3]},
index=expected_index,
)
tm.assert_frame_equal(result, expected)
def test_melt_with_duplicate_columns(self):
# GH#41951
df = DataFrame([["id", 2, 3]], columns=["a", "b", "b"])
result = df.melt(id_vars=["a"], value_vars=["b"])
expected = DataFrame(
[["id", "b", 2], ["id", "b", 3]], columns=["a", "variable", "value"]
)
tm.assert_frame_equal(result, expected)
class TestLreshape:
def test_pairs(self):
data = {
"birthdt": [
"08jan2009",
"20dec2008",
"30dec2008",
"21dec2008",
"11jan2009",
],
"birthwt": [1766, 3301, 1454, 3139, 4133],
"id": [101, 102, 103, 104, 105],
"sex": ["Male", "Female", "Female", "Female", "Female"],
"visitdt1": [
"11jan2009",
"22dec2008",
"04jan2009",
"29dec2008",
"20jan2009",
],
"visitdt2": ["21jan2009", np.nan, "22jan2009", "31dec2008", "03feb2009"],
"visitdt3": ["05feb2009", np.nan, np.nan, "02jan2009", "15feb2009"],
"wt1": [1823, 3338, 1549, 3298, 4306],
"wt2": [2011.0, np.nan, 1892.0, 3338.0, 4575.0],
"wt3": [2293.0, np.nan, np.nan, 3377.0, 4805.0],
}
df = DataFrame(data)
spec = {
"visitdt": [f"visitdt{i:d}" for i in range(1, 4)],
"wt": [f"wt{i:d}" for i in range(1, 4)],
}
result = lreshape(df, spec)
exp_data = {
"birthdt": [
"08jan2009",
"20dec2008",
"30dec2008",
"21dec2008",
"11jan2009",
"08jan2009",
"30dec2008",
"21dec2008",
"11jan2009",
"08jan2009",
"21dec2008",
"11jan2009",
],
"birthwt": [
1766,
3301,
1454,
3139,
4133,
1766,
1454,
3139,
4133,
1766,
3139,
4133,
],
"id": [101, 102, 103, 104, 105, 101, 103, 104, 105, 101, 104, 105],
"sex": [
"Male",
"Female",
"Female",
"Female",
"Female",
"Male",
"Female",
"Female",
"Female",
"Male",
"Female",
"Female",
],
"visitdt": [
"11jan2009",
"22dec2008",
"04jan2009",
"29dec2008",
"20jan2009",
"21jan2009",
"22jan2009",
"31dec2008",
"03feb2009",
"05feb2009",
"02jan2009",
"15feb2009",
],
"wt": [
1823.0,
3338.0,
1549.0,
3298.0,
4306.0,
2011.0,
1892.0,
3338.0,
4575.0,
2293.0,
3377.0,
4805.0,
],
}
exp = DataFrame(exp_data, columns=result.columns)
tm.assert_frame_equal(result, exp)
result = lreshape(df, spec, dropna=False)
exp_data = {
"birthdt": [
"08jan2009",
"20dec2008",
"30dec2008",
"21dec2008",
"11jan2009",
"08jan2009",
"20dec2008",
"30dec2008",
"21dec2008",
"11jan2009",
"08jan2009",
"20dec2008",
"30dec2008",
"21dec2008",
"11jan2009",
],
"birthwt": [
1766,
3301,
1454,
3139,
4133,
1766,
3301,
1454,
3139,
4133,
1766,
3301,
1454,
3139,
4133,
],
"id": [
101,
102,
103,
104,
105,
101,
102,
103,
104,
105,
101,
102,
103,
104,
105,
],
"sex": [
"Male",
"Female",
"Female",
"Female",
"Female",
"Male",
"Female",
"Female",
"Female",
"Female",
"Male",
"Female",
"Female",
"Female",
"Female",
],
"visitdt": [
"11jan2009",
"22dec2008",
"04jan2009",
"29dec2008",
"20jan2009",
"21jan2009",
np.nan,
"22jan2009",
"31dec2008",
"03feb2009",
"05feb2009",
np.nan,
np.nan,
"02jan2009",
"15feb2009",
],
"wt": [
1823.0,
3338.0,
1549.0,
3298.0,
4306.0,
2011.0,
np.nan,
1892.0,
3338.0,
4575.0,
2293.0,
np.nan,
np.nan,
3377.0,
4805.0,
],
}
exp = DataFrame(exp_data, columns=result.columns)
tm.assert_frame_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = lreshape(df, spec, dropna=False, label="foo")
spec = {
"visitdt": [f"visitdt{i:d}" for i in range(1, 3)],
"wt": [f"wt{i:d}" for i in range(1, 4)],
}
msg = "All column lists must be same length"
with pytest.raises(ValueError, match=msg):
lreshape(df, spec)
class TestWideToLong:
def test_simple(self):
np.random.seed(123)
x = np.random.randn(3)
df = DataFrame(
{
"A1970": {0: "a", 1: "b", 2: "c"},
"A1980": {0: "d", 1: "e", 2: "f"},
"B1970": {0: 2.5, 1: 1.2, 2: 0.7},
"B1980": {0: 3.2, 1: 1.3, 2: 0.1},
"X": dict(zip(range(3), x)),
}
)
df["id"] = df.index
exp_data = {
"X": x.tolist() + x.tolist(),
"A": ["a", "b", "c", "d", "e", "f"],
"B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
"year": [1970, 1970, 1970, 1980, 1980, 1980],
"id": [0, 1, 2, 0, 1, 2],
}
expected = DataFrame(exp_data)
expected = expected.set_index(["id", "year"])[["X", "A", "B"]]
result = wide_to_long(df, ["A", "B"], i="id", j="year")
tm.assert_frame_equal(result, expected)
def test_stubs(self):
# GH9204 wide_to_long call should not modify 'stubs' list
df = DataFrame([[0, 1, 2, 3, 8], [4, 5, 6, 7, 9]])
df.columns = ["id", "inc1", "inc2", "edu1", "edu2"]
stubs = ["inc", "edu"]
wide_to_long(df, stubs, i="id", j="age")
assert stubs == ["inc", "edu"]
def test_separating_character(self):
# GH14779
np.random.seed(123)
x = np.random.randn(3)
df = DataFrame(
{
"A.1970": {0: "a", 1: "b", 2: "c"},
"A.1980": {0: "d", 1: "e", 2: "f"},
"B.1970": {0: 2.5, 1: 1.2, 2: 0.7},
"B.1980": {0: 3.2, 1: 1.3, 2: 0.1},
"X": dict(zip(range(3), x)),
}
)
df["id"] = df.index
exp_data = {
"X": x.tolist() + x.tolist(),
"A": ["a", "b", "c", "d", "e", "f"],
"B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
"year": [1970, 1970, 1970, 1980, 1980, 1980],
"id": [0, 1, 2, 0, 1, 2],
}
expected = DataFrame(exp_data)
expected = expected.set_index(["id", "year"])[["X", "A", "B"]]
result = wide_to_long(df, ["A", "B"], i="id", j="year", sep=".")
tm.assert_frame_equal(result, expected)
def test_escapable_characters(self):
np.random.seed(123)
x = np.random.randn(3)
df = DataFrame(
{
"A(quarterly)1970": {0: "a", 1: "b", 2: "c"},
"A(quarterly)1980": {0: "d", 1: "e", 2: "f"},
"B(quarterly)1970": {0: 2.5, 1: 1.2, 2: 0.7},
"B(quarterly)1980": {0: 3.2, 1: 1.3, 2: 0.1},
"X": dict(zip(range(3), x)),
}
)
df["id"] = df.index
exp_data = {
"X": x.tolist() + x.tolist(),
"A(quarterly)": ["a", "b", "c", "d", "e", "f"],
"B(quarterly)": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
"year": [1970, 1970, 1970, 1980, 1980, 1980],
"id": [0, 1, 2, 0, 1, 2],
}
expected = DataFrame(exp_data)
expected = expected.set_index(["id", "year"])[
["X", "A(quarterly)", "B(quarterly)"]
]
result = wide_to_long(df, ["A(quarterly)", "B(quarterly)"], i="id", j="year")
tm.assert_frame_equal(result, expected)
def test_unbalanced(self):
# test that we can have a varying amount of time variables
df = DataFrame(
{
"A2010": [1.0, 2.0],
"A2011": [3.0, 4.0],
"B2010": [5.0, 6.0],
"X": ["X1", "X2"],
}
)
df["id"] = df.index
exp_data = {
"X": ["X1", "X2", "X1", "X2"],
"A": [1.0, 2.0, 3.0, 4.0],
"B": [5.0, 6.0, np.nan, np.nan],
"id": [0, 1, 0, 1],
"year": [2010, 2010, 2011, 2011],
}
expected = DataFrame(exp_data)
expected = expected.set_index(["id", "year"])[["X", "A", "B"]]
result = wide_to_long(df, ["A", "B"], i="id", j="year")
tm.assert_frame_equal(result, expected)
def test_character_overlap(self):
# Test we handle overlapping characters in both id_vars and value_vars
df = DataFrame(
{
"A11": ["a11", "a22", "a33"],
"A12": ["a21", "a22", "a23"],
"B11": ["b11", "b12", "b13"],
"B12": ["b21", "b22", "b23"],
"BB11": [1, 2, 3],
"BB12": [4, 5, 6],
"BBBX": [91, 92, 93],
"BBBZ": [91, 92, 93],
}
)
df["id"] = df.index
expected = DataFrame(
{
"BBBX": [91, 92, 93, 91, 92, 93],
"BBBZ": [91, 92, 93, 91, 92, 93],
"A": ["a11", "a22", "a33", "a21", "a22", "a23"],
"B": ["b11", "b12", "b13", "b21", "b22", "b23"],
"BB": [1, 2, 3, 4, 5, 6],
"id": [0, 1, 2, 0, 1, 2],
"year": [11, 11, 11, 12, 12, 12],
}
)
expected = expected.set_index(["id", "year"])[["BBBX", "BBBZ", "A", "B", "BB"]]
result = wide_to_long(df, ["A", "B", "BB"], i="id", j="year")
tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1))
def test_invalid_separator(self):
# if an invalid separator is supplied a empty data frame is returned
sep = "nope!"
df = DataFrame(
{
"A2010": [1.0, 2.0],
"A2011": [3.0, 4.0],
"B2010": [5.0, 6.0],
"X": ["X1", "X2"],
}
)
df["id"] = df.index
exp_data = {
"X": "",
"A2010": [],
"A2011": [],
"B2010": [],
"id": [],
"year": [],
"A": [],
"B": [],
}
expected = DataFrame(exp_data).astype({"year": "int"})
expected = expected.set_index(["id", "year"])[
["X", "A2010", "A2011", "B2010", "A", "B"]
]
expected.index = expected.index.set_levels([0, 1], level=0)
result = wide_to_long(df, ["A", "B"], i="id", j="year", sep=sep)
tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1))
def test_num_string_disambiguation(self):
# Test that we can disambiguate number value_vars from
# string value_vars
df = DataFrame(
{
"A11": ["a11", "a22", "a33"],
"A12": ["a21", "a22", "a23"],
"B11": ["b11", "b12", "b13"],
"B12": ["b21", "b22", "b23"],
"BB11": [1, 2, 3],
"BB12": [4, 5, 6],
"Arating": [91, 92, 93],
"Arating_old": [91, 92, 93],
}
)
df["id"] = df.index
expected = DataFrame(
{
"Arating": [91, 92, 93, 91, 92, 93],
"Arating_old": [91, 92, 93, 91, 92, 93],
"A": ["a11", "a22", "a33", "a21", "a22", "a23"],
"B": ["b11", "b12", "b13", "b21", "b22", "b23"],
"BB": [1, 2, 3, 4, 5, 6],
"id": [0, 1, 2, 0, 1, 2],
"year": [11, 11, 11, 12, 12, 12],
}
)
expected = expected.set_index(["id", "year"])[
["Arating", "Arating_old", "A", "B", "BB"]
]
result = wide_to_long(df, ["A", "B", "BB"], i="id", j="year")
tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1))
def test_invalid_suffixtype(self):
# If all stubs names end with a string, but a numeric suffix is
# assumed, an empty data frame is returned
df = DataFrame(
{
"Aone": [1.0, 2.0],
"Atwo": [3.0, 4.0],
"Bone": [5.0, 6.0],
"X": ["X1", "X2"],
}
)
df["id"] = df.index
exp_data = {
"X": "",
"Aone": [],
"Atwo": [],
"Bone": [],
"id": [],
"year": [],
"A": [],
"B": [],
}
expected = | DataFrame(exp_data) | pandas.DataFrame |
import numpy as np
import pandas as pd
from dateutil.parser import parse
def index_ts_mapper(start, interval, timestamp):
"""
takes time series index (timestamp) and return the integer index in model
"""
if isinstance(start, (int, np.integer)):
return int((timestamp-start)/(interval))
elif isinstance(start, (pd.Timestamp)):
return int((timestamp.value-start.value)/(interval*10**9))
else:
raise Exception('start value for the mapper must either be integers or pd.timestamp')
def index_ts_inv_mapper(start, interval, index):
"""
takes integer index in model (index) and return the time series index
"""
if isinstance(start, (int, np.integer)):
return int((index *interval) + start)
elif isinstance(start, (pd.Timestamp)):
return pd.to_datetime(float(index*(interval*10**9)+start.tz_localize(None).value))
else:
raise Exception('start value for the inv_mapper must either be integers or pd.timestamp')
def index_exists(interface, index_name ):
"""
:return:
"""
return interface.table_exists(index_name+'_meta')
def get_bound_time(interface, time_series_table, time_column, exterme = 'min'):
min_ = interface.get_extreme_value(time_series_table, time_column, exterme)
if isinstance(min_, (int, np.integer)): return min_
else:
min_ = parse(min_)
return | pd.to_datetime(min_) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 4 06:03:28 2019
@author: tanujsinghal
"""
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
import re
import copy
from sklearn.metrics import jaccard_similarity_score,confusion_matrix
from sklearn.externals import joblib
import os
import pathlib
import pickle
if __name__=='__main__':
hin=pd.read_csv("Hinglish_Profanity_List.csv")
train = pd.read_csv("train.csv")
test = pd.read_csv("test.csv")
columns = ['obscene','insult','toxic','severe_toxic','identity_hate','threat']
hin_bad_words = hin.iloc[:,0].values.tolist()
bad_words_to_english = hin.iloc[:,1].values.tolist()
hin = hin.iloc[:,:-1].values.tolist()
train, test = train_test_split(train, test_size=0.2)
labels = train.iloc[:,2:]
train_data = train.iloc[:,1]
test_data = test.iloc[:,1]
features = 5000
ngram = (1,2)
vectorizer = TfidfVectorizer(stop_words='english',\
token_pattern = "\w*[a-z]\w*",\
ngram_range=ngram,\
max_features=features)
train_features = vectorizer.fit_transform(train_data)
filename='vect'
pickle.dump(vectorizer, open(filename, 'wb'))
test_features = vectorizer.transform(test_data)
logreg = LogisticRegression(C=10,solver="liblinear")
models={}
logistic_results = pd.DataFrame(columns=columns)
cnt=0
for i in columns:
y = train[i]
models[i]=copy.copy(logreg.fit(train_features, y))
filename = "model_"+ str(cnt)
pickle.dump(models[i], open(filename, 'wb'))
ypred_X = logreg.predict(train_features)
testy_prob = logreg.predict_proba(test_features)[:,1]
logistic_results[i] = testy_prob
cnt+=1
def abusive_hinglish_to_english(data):
hin=pd.read_csv("Hinglish_Profanity_List.csv")
hin_bad_words = hin.iloc[:,0].values.tolist()
bad_words_to_english = hin.iloc[:,1].values.tolist()
hin = hin.iloc[:,:-1].values.tolist()
cnt=0
for sentence in data:
wordList = sentence.split()
for word in hin_bad_words:
if word in wordList:
x=wordList.index(word)
wordList[x]=bad_words_to_english[hin_bad_words.index(word)]
sentence = ' '.join(wordList)
data[cnt]=sentence
cnt+=1
return data
def check_dir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def myinput(vectorizer,model,val):
sent="Thank you for understanding. I think very highly of you and would not revert without discussion."
sent2="Yo bitch Ja Rule is more succesful then you'll ever be whats up with you and hating you sad mofuckas...i should bitch slap ur pethedic white faces and get you to kiss my ass you guys sicken me. Ja rule is about pride in da music man. dont diss that shit on him. and nothin is wrong bein like tupac he was a brother too...fuckin white boys get things right next time.,"
sen3="Explanation Why the edits made under my username Hardcore Metallica Fan were reverted? They weren't vandalisms, just closure on some GAs after I voted at New York Dolls FAC. And please don't remove the template from the talk page since I'm retired now.172.16.17.32"
sen4="COCKSUCKER BEFORE YOU PISS AROUND ON MY WORK"
sen5="While booking during rush hour, it is always advisable to check this box ON. It will deduct your FULL ticket amount first. After that it check whether confirmed ticket available or not. If confirmed ticket not available it will show ticket not available. Within 2 days will get your full amount Refund."
sen6="<NAME>"
sent7='F**K YOU!good day'
l=[sent,sent2,sen3,sen4,sen5,sen6,sent7]
l=abusive_hinglish_to_english(l)
df = pd.DataFrame(l)
user_data = vectorizer.transform(l)
results2 = pd.DataFrame(columns=columns)
for i in columns:
user_results = models[i].predict_proba(user_data)[:,1]
results2[i] = user_results
y=results2.iloc[val].values
x = columns
plt.ylim(0, 100)
plt.tight_layout()
plt.bar(x, height= y)
plt.show()
plt.savefig('foo.png')
return df,results2
def myinput_network(text):
columns = ['obscene','insult','toxic','severe_toxic','identity_hate','threat']
sent="Thank you for understanding. I think very highly of you and would not revert without discussion."
sent2="Yo bitch Ja Rule is more succesful then you'll ever be whats up with you and hating you sad mofuckas...i should bitch slap ur pethedic white faces and get you to kiss my ass you guys sicken me. Ja rule is about pride in da music man. dont diss that shit on him. and nothin is wrong bein like tupac he was a brother too...fuckin white boys get things right next time.,"
sen3="Explanation Why the edits made under my username Hardcore Metallica Fan were reverted? They weren't vandalisms, just closure on some GAs after I voted at New York Dolls FAC. And please don't remove the template from the talk page since I'm retired now.172.16.17.32"
sen4="COCKSUCKER BEFORE YOU PISS AROUND ON MY WORK"
sen5="While booking during rush hour, it is always advisable to check this box ON. It will deduct your FULL ticket amount first. After that it check whether confirmed ticket available or not. If confirmed ticket not available it will show ticket not available. Within 2 days will get your full amount Refund."
sen6="<NAME>"
sent7='F**K YOU!good day'
l=[sent,sent2,sen3,sen4,sen5,sen6,sent7,text]
l=[text,sent2]
if len(text)>1 and type(text) is list:
l=text
res,x=myinput_network2(l)
return res,x
l=abusive_hinglish_to_english(l)
df = pd.DataFrame(l)
f='vect'
vect= pickle.load(open(f, 'rb'))
user_data = vect.transform(l)
results2 = pd.DataFrame(columns=columns)
cnt=0
mymodels={}
for i in range(6):
filename='model_'+str(i)
mymodels[columns[i]]= pickle.load(open(filename, 'rb'))
for i in range(6):
user_results = mymodels[columns[i]].predict_proba(user_data)[:,1]
results2[columns[i]] = user_results
x = columns
return results2.iloc[0].values,x
def myinput_network2(text):
columns = ['obscene','insult','toxic','severe_toxic','identity_hate','threat']
x = columns
if len(text)>1:
l=text
print(l)
print(type(l))
l=abusive_hinglish_to_english(l)
df = pd.DataFrame(l)
f='vect'
vect= pickle.load(open(f, 'rb'))
user_data = vect.transform(l)
results2 = | pd.DataFrame(columns=columns) | pandas.DataFrame |
from pathlib import Path
from unicodedata import normalize
import sys
from itertools import chain
sys.path.append("c:\\Users\\kpdav\\machine_learning\\projects\\PGA-portfolio-optimizer\\config")
import config
import requests
from bs4 import BeautifulSoup
import pandas as pd
def pgatour_tournament_ids(url):
"""Find pgatour.com tournament ids on webpage
Args:
url (str) : pgatour.com stats webpage
Returns:
tournament_ids, with entires of (tournament_id, tournament_name)
"""
page = requests.get(url)
soup = BeautifulSoup(page.content, "lxml")
season = int(url[url.rfind("y") + 1:url.rfind(".")])
headers = soup.find("section", class_="statistics-details-content")
tournaments_info = []
if headers is not None:
tournament_parent = headers.find_all("div",
class_="statistics-details-select-wrap tournament")
if tournament_parent is not None:
tournaments = tournament_parent[0].find_all("option")
for tourn in tournaments:
tournaments_info.append((tourn["value"], tourn.text, season))
return tournaments_info
def get_pgatour_ids(start, end):
"""Get pgatour.com tournament ids for a range of seasons
Args:
start (int) : start season
end (int): end season
Returns:
pgatour tournament ids
"""
seasons = [year for year in range(start, end+1)]
# same tournament id's for all stat pages on pgatour.com
urls = [f"https://www.pgatour.com/content/pgatour/stats/stat.328.y{season}.html"
for season in seasons]
pgatour_ids = [pgatour_tournament_ids(url) for url in urls]
# flatten data
pgatour_ids = list(chain.from_iterable(pgatour_ids))
return pgatour_ids
def save_pgatour_ids(file_p):
"""Save pgatour tournament ids in given file path
Args:
file_p (str) : file path
"""
pgatour_data = get_pgatour_ids(2017, 2020)
df = pd.DataFrame(pgatour_data, columns=["tournament_id", "tournament_name", "season"])
df.to_csv(file_p, index=False)
def get_espn_tournaments(start, end=None, all_tournaments=False):
"""Get espn tournaments for given season(s).
Notes:
if all_tournaments is left as False, the dataframe of tournaments
will contain only valid tournamets. Otherwise tournaments that have
not been cancelled will be given (this includes tournaments of match play,
charity events, etc.)
Args:
start (int) : starting pga season
end (int) : ending pga season, optional
all_tournaments (bool) : get all or valid tournaments
Returns:
dataframe of tournaments for specified season(s)
"""
if all_tournaments:
pass
else:
# change path to point to 2011 to 2016 espn tournaments
valid_tournaments_path = str(Path(config.TOURNAMENTS_DIR, "valid_espn_tournaments_2011_2016.csv"))
df = pd.read_csv(valid_tournaments_path, date_parser=["date"])
if end is not None:
season_df = df[(df.season_id >= start) & (df.season_id <= end)]
else:
season_df = df[df.season_id == start]
return season_df
def pgatour_statistic(url):
"""Get data on give pgatour statistic
Args:
url (str) : pgatour statistic url
Returns:
data of pgatour statistic
"""
with requests.Session() as session:
page = session.get(url)
soup = BeautifulSoup(page.content, "lxml")
print(f"Fetching: {url}")
data = []
data_keys = []
# meta-information
pga_stat_id = url[url.rfind("stat.") + 5 : url.rfind(".y")]
pga_tourn_id = url[url.rfind("off.") + 4 : url.rfind(".")]
pga_season_id = url[url.rfind("y") + 1: url.rfind(".eoff")]
statistic_name = soup.select("section.statistics-details-content")
if statistic_name is not None:
name_header = statistic_name[0].find("div", class_="header")
name = name_header.find("h1")
if name is not None:
pga_stat_name = name.text
pga_stat_name = pga_stat_name.replace(" ", "_")
statistic_table = soup.select("div.details-table-wrap")
if statistic_table is not None:
header = statistic_table[0].find("thead")
if header is not None:
header_cols = header.find_all("th")
for h_col in header_cols:
col_str = h_col.text
col_str = normalize('NFKD',col_str)
col_str = col_str.strip()
col_str = col_str.replace(" ", "_")
data_keys.append(col_str)
body = statistic_table[0].find("tbody")
if body is not None:
players = body.find_all("tr")
for player in players:
p_data = player.find_all("td")
player_dict = {}
player_dict["pga_stat_name"] = pga_stat_name
player_dict["pga_stat_id"] = pga_stat_id
player_dict["pga_tourn_id"] = pga_tourn_id
player_dict["pga_season_id"] = pga_season_id
key_counter = 0
for col in p_data:
player_dict[data_keys[key_counter]] = col.text.strip()
key_counter += 1
data.append(player_dict)
return data
def get_pgatour_statistic(url, start, end=None):
"""Get pgatour statistic over given range of season(s)
Args:
url (str) : base url for pga statistic
start (int) : start season
end (int) : end season
Returns:
dataframe of pgatour statistic over given range of season(s)
"""
front_url = url[:url.rfind("html")]
end_url = url[url.rfind("."): ]
if end is not None:
base_urls = [front_url + "y" + str(season) for season in range(start, end+1)]
pgatour_tournaments_path = str(Path(config.RAW_DATA_DIR, "PGATOUR_tournament_ids_2017_2020.csv"))
pgatour_ids = pd.read_csv(pgatour_tournaments_path)
pgatour_stat_urls = []
for url in base_urls:
season_id = int(url[url.rfind("y")+1:])
tournament_id_list = pgatour_ids["tournament_id"][pgatour_ids["season"] == season_id]
for t_id in tournament_id_list:
stat_url = url + ".eoff." + t_id + end_url
# print(stat_url)
pgatour_stat_urls.append(stat_url)
# tournament_id_list = pgatour_ids["tournament_id"][(pgatour_ids["season"] <= end) & (pgatour_ids["season"] >= start)].tolist()
stat_data = [pgatour_statistic(url) for url in pgatour_stat_urls]
# flatten data
stat_data = list(chain.from_iterable(stat_data))
df = | pd.DataFrame(stat_data) | pandas.DataFrame |
from geographiclib.geodesic import Geodesic
from LatLon23 import string2latlon, LatLon
from geomag import declination
import pandas as pd
from openpyxl import load_workbook
import numpy as np
import config
import pynmea2
import subprocess
import os
import xlrd
def Vector2Polar(N,E):
Mag = np.sqrt(N**2 + E**2)
Az = 90 - np.arctan2(N, E) * 180/np.pi
if Az <= 0:
Az = Az + 360
return(Az, Mag)
def bullcalculate(Lat, Long):
if config.becoord is None:
bull = ''
else:
if len(Lat)>0 and len(Long)>0:
try:
coord1 = config.becoord
coord2 = string2latlon(Lat, Long, 'H% %d% %M')
rbg = Geodesic.WGS84.Inverse(float(coord1.lat), float(coord1.lon), float(coord2.lat), float(coord2.lon))
bulldist = round(rbg['s12']*0.000539957) # meters to NM
bullaz = round(rbg['azi1'] - declination(float(coord1.lat), float(coord1.lon)))
if bullaz < 0:
bullaz = 360 + bullaz
bull = str(bullaz) + '/' + str(bulldist)
except:
bull = ''
else:
bull = ''
return bull
def append_df_to_excel(filename, df, sheet_name, startrow=0,truncate_sheet=False,**to_excel_kwargs):
"""
Append a DataFrame [df] to existing Excel file [filename]
into [sheet_name] Sheet.
If [filename] doesn't exist, then this function will create it.
Parameters:
filename : File path or existing ExcelWriter
(Example: '/path/to/file.xlsx')
df : dataframe to save to workbook
sheet_name : Name of sheet which will contain DataFrame.
(default: 'Sheet1')
startrow : upper left cell row to dump data frame.
Per default (startrow=None) calculate the last row
in the existing DF and write to the next row...
truncate_sheet : truncate (remove and recreate) [sheet_name]
before writing DataFrame to Excel file
to_excel_kwargs : arguments which will be passed to `DataFrame.to_excel()`
[can be dictionary]
Returns: None
"""
# ignore [engine] parameter if it was passed
if 'engine' in to_excel_kwargs:
to_excel_kwargs.pop('engine')
writer = pd.ExcelWriter(filename, engine='openpyxl')
# Python 2.x: define [FileNotFoundError] exception if it doesn't exist
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
try:
# try to open an existing workbook
writer.book = load_workbook(filename)
# get the last row in the existing Excel sheet
# if it was not specified explicitly
if startrow is None and sheet_name in writer.book.sheetnames:
startrow = writer.book[sheet_name].max_row
# truncate sheet
if truncate_sheet and sheet_name in writer.book.sheetnames:
# index of [sheet_name] sheet
idx = writer.book.sheetnames.index(sheet_name)
# remove [sheet_name]
writer.book.remove(writer.book.worksheets[idx])
# create an empty sheet [sheet_name] using old index
writer.book.create_sheet(sheet_name, idx)
# copy existing sheets
writer.sheets = {ws.title:ws for ws in writer.book.worksheets}
except FileNotFoundError:
# file does not exist yet, we will create it
pass
if startrow is None:
startrow = 0
# write out the new sheet
df.to_excel(writer, sheet_name, startrow=startrow, **to_excel_kwargs)
ws = writer.book[sheet_name]
for column_cells in ws.columns:
length = max(map(lambda cell: len(str(cell.value)) if cell.value else 0, column_cells))
ws.column_dimensions[column_cells[0].column_letter].width = length+3
# save the workbook
writer.save()
def updatefillins(filename):
try:
# try to open an existing workbook
writer = pd.ExcelWriter(filename, engine='openpyxl')
writer.book = load_workbook(filename)
benamerng = writer.book.defined_names['BEname']
cells = []
for title, coord in benamerng.destinations:
ws = writer.book[title]
ws[coord] = config.bename.upper()
for title, coord in writer.book.defined_names['BELat'].destinations:
ws = writer.book[title]
ws[coord] = str(config.belat)
for title, coord in writer.book.defined_names['BELong'].destinations:
ws = writer.book[title]
ws[coord] = str(config.belong)
if len(config.csname)>0:
for title, coord in writer.book.defined_names['cs'].destinations:
ws = writer.book[title]
ws[coord] = str(config.csname)
# copy existing sheets
writer.sheets = {ws.title:ws for ws in writer.book.worksheets}
except:
# file does not exist yet, we will create it
print('Error writing Fill In Values')
# save the workbook
writer.save()
def to_gpsnmea(df,filename):
df = df.rename(columns={"Time (UTC)": "TIME"})
df['LAT'] = df['LAT'].str.replace(" ", '')
df['LONG'] = df['LONG'].str.replace(" ", '')
filename = filename.replace('Debrief Card','GPS Trail').replace('xlsx','gps')
gpsfile = open(filename, 'a')
for index, row in df.iterrows():
mvar = float(row.THDG) - float(row.MHDG)
if mvar < 0:
mvarH = 'W'
else:
mvarH = 'E'
GS = f'{row.GS:05.1f}'
GTRK = f'{row.GTRK:05.1f}'
MHDG = f'{row.MHDG:05.1f}'
dtime = row.TIME.strftime('%H%M%S')
ddate = row.TIME.strftime('%d%m%y')
alt = str(round(float(row.ALT) * 0.3048))
RMC = pynmea2.ZDA('GP', 'RMC', (dtime, 'A', row.LAT[1:], row.LAT[0], row.LONG[1:], row.LONG[0], GS, GTRK, ddate, f'{mvar:05.1f}', mvarH))
GGA = pynmea2.GGA('GP', 'GGA', (dtime, row.LAT[1:], row.LAT[0], row.LONG[1:], row.LONG[0], '1', '', '', alt, 'M', '', '', '', '0'))
VTG = pynmea2.GGA('GP', 'VTG', (GTRK, 'T', MHDG, 'M', f'{row.GS:06.2f}', 'N', '', 'K'))
gpsfile.write(str(RMC) + "\n")
gpsfile.write(str(GGA) + "\n")
gpsfile.write(str(VTG) + "\n")
gpsfile.close()
#os.system('cmd .\output\GPSBabel\gpsbabel -i nmea -f ' + filename + ' -x interpolate,time=10 -o nmea -F "testtrack.gps"')
#subprocess.Popen(r'explorer /select,'+ filename )
os.startfile(filename, 'open')
def jassm_report_match(debrief_filename, jassm_report):
jreport = pd.ExcelFile(jassm_report)
wpngroups = []
for sheet in jreport.sheet_names:
if 'JASSMGRP' in sheet:
#print(sheet)
df = jreport.parse(sheet, skiprows=5, index_col=None, na_values=['NA'])
df['wpngroup'] = str(sheet) + " " + 'MSN'+ df['Msn'].astype(str)
wpngroups.append(df)
if len(wpngroups)>0:
wpns = pd.read_excel(debrief_filename, sheet_name='Combined',index_col=None, na_values=['NA'])
wpns.astype({'TGT LAT':str,'TGT LONG':str,'TGT ELEV':str,'BULL':str}).dtypes
wpns['TGT LAT'] = wpns['TGT LAT'].astype(str)
wpns['TGT LONG'] = wpns['TGT LONG'].astype(str)
wpns['TGT ELEV'] = wpns['TGT ELEV'].astype(str)
wpns['BULL'] = wpns['BULL'].astype(str)
for i, row in wpns.iterrows():
for group in wpngroups:
for j, rows in group.iterrows():
if wpns.at[i,'TGT Name'] == group.at[j,'wpngroup']:
wpns.at[i, 'TGT Name'] = group.at[j,'Mission Name']
wpns.at[i, 'TGT LAT'] = group.at[j, 'Tgt Latitude']
wpns.at[i, 'TGT LONG'] = group.at[j,'Tgt Longitude']
wpns.at[i, 'TGT ELEV'] = str(group.at[j,'Tgt Elev (HAE)']) + "' HAE"
try:
wpns.at[i, 'BULL'] = bullcalculate(wpns.at[i, 'TGT LAT'], wpns.at[i, 'TGT LONG'])
except:
wpns.at[i, 'BULL'] = ''
wpns = wpns.fillna('')
wpns = wpns.replace('nan', '', regex=True)
return wpns
else:
return pd.DataFrame()
def jassm_report_match(debrief_filename='none', jassm_report='jrep.xlsm'):
if debrief_filename == 'none':
debrief_filename = QFileDialog.getOpenFileName(self, 'Open Debrief Card Excel File',
os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop'),
"Excel File (*.xlsx)")
if jassm_report == 'none':
jassm_report = QFileDialog.getOpenFileName(self, 'Open JMPS JASSM Report Excel File',
os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop'),
"Excel File (*.xlsx)")
jreport = | pd.ExcelFile(jassm_report) | pandas.ExcelFile |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: | range(10) | pandas.compat.range |
"""
Glue code for conducting experiment 1
"""
import os
# os.environ["TF_MIN_GPU_MULTIPROCESSOR_COUNT"] = "4"
# No verbose logging from TF
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1"
import time
import random
import argparse
import signal
import numpy as np
from pathlib import Path
import pandas
from ensemble_experiments.datagen2d import CLASS_A, CLASS_B
def entropy_variance(stats: pandas.DataFrame, L: int):
"""
Entropy Measure for Variance
Parameters:
stats: DataFrame with 'correct_count' and 'incorrect_count' columns detailing the number of
networks that correctly and incorrectly classified each validation point respectively
L: The number of networks in the ensemble
"""
if L == 1:
return 0
N = len(stats)
minimum_counts = stats.loc[:, ('correct_count', 'incorrect_count')].min(axis=1)
coeff = 1/(L - np.ceil(L / 2))
mult = coeff * minimum_counts
summation = mult.sum()
E = summation / N
return E
def train(train_df, test_df, save_dir, epochs, verbose, net_number, learn_rate, hidden_nodes, patience):
save_net = save_dir / "net.h5"
save_overtrained_net = save_dir / "overtrained_net.h5"
import keras
from keras.callbacks import EarlyStopping
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD
start_time = time.time()
if save_net.exists() and save_overtrained_net.exists():
print(f"Models already exist for {save_net}")
else:
print(f"Training for {save_net}")
train_classes = train_df["class"].values
test_classes = test_df["class"].values
train_data = train_df.as_matrix(columns=("x", "y"))
test_data = test_df.as_matrix(columns=("x", "y"))
stopper = EarlyStopping(
monitor="val_acc",
patience=patience,
verbose=verbose,
)
model = Sequential([
Dense(hidden_nodes, input_shape=(2,), activation="sigmoid"),
Dense(1, activation="sigmoid")
])
model.compile(
loss="binary_crossentropy",
optimizer=SGD(lr=learn_rate),
metrics=["accuracy"]
)
hist = model.fit(
train_data,
train_classes,
verbose=verbose,
epochs=epochs,
validation_data=(test_data, test_classes),
callbacks=[stopper]
)
model.save(save_net)
op_epochs = len(hist.epoch)
ot_epochs = op_epochs * 10
print(f"Trained to {op_epochs} epochs as determined by early exit, saved to {save_net}\n"
f"Beginning overtrain to {ot_epochs} epochs")
train_time = time.time()
print(f">> Train Time: {train_time-start_time:.2f} seconds")
model.fit(
train_data,
train_classes,
verbose=verbose,
epochs=ot_epochs,
initial_epoch=op_epochs,
validation_data=(test_data, test_classes)
)
model.save(save_overtrained_net)
print(f"Overtrained to {ot_epochs} epochs, saved to {save_overtrained_net}")
end_time = time.time()
print(f">> Overtrain Time: {end_time-train_time:.2f} seconds")
print(f">>>> Total Time: {end_time-start_time:.2f} seconds")
# Avoid Memory Leak https://github.com/keras-team/keras/issues/2102
keras.backend.clear_session()
return {
"id": net_number,
"dir": save_dir,
"net": save_net,
"ot_net": save_overtrained_net
}
def main(args):
print("Running Experiment")
import ensemble_experiments.datagen2d as dg
print(f"BEGIN RUN FOR ERROR RATE {args.error_rate}%")
ratedir = args.save_dir / f"error-{args.error_rate}"
ratedir.mkdir(exist_ok=True, parents=True)
data_csv = ratedir / "data.csv"
if not data_csv.exists():
data = dg.generate_data(args.data_size, args.error_rate)
data.to_csv(data_csv)
else:
data = pandas.read_csv(data_csv)
val_data_csv = ratedir / "validation.csv"
if not val_data_csv.exists():
val_data = dg.generate_data(args.val_data_size, args.error_rate)
val_data.to_csv(val_data_csv)
else:
val_data = | pandas.read_csv(val_data_csv) | pandas.read_csv |
# Multiple Linear Regression
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
from sklearn.datasets import load_boston
dataset = load_boston()
df = | pd.DataFrame(dataset.data) | pandas.DataFrame |
import pandas as pd
from unittest import TestCase, main
from metapool.amplipool import assign_emp_index, _load_emp_indices
class AmplipoolTests(TestCase):
def setUp(self):
self.plate_metadata = pd.DataFrame([
{
'Plate Position': '1',
'Primer Plate #': '1',
'Plating': 'SF',
'Extraction Kit Lot': '166032128',
'Extraction Robot': 'Carmen_HOWE_KF3',
'TM1000 8 Tool': '109379Z',
'Primer Date': '2021-08-17',
'MasterMix Lot': '978215',
'Water Lot': 'RNBJ0628',
'Processing Robot': 'Echo550',
'Sample Plate': 'THDMI_UK_Plate_2',
'Project_Name': 'THDMI UK',
'Original Name': ''
},
{
'Plate Position': '2',
'Primer Plate #': '2',
'Plating': 'AS',
'Extraction Kit Lot': '166032128',
'Extraction Robot': 'Carmen_HOWE_KF4',
'TM1000 8 Tool': '109379Z',
'Primer Date': '2021-08-17',
'MasterMix Lot': '978215',
'Water Lot': 'RNBJ0628',
'Processing Robot': 'Echo550',
'Sample Plate': 'THDMI_UK_Plate_3',
'Project_Name': 'THDMI UK',
'Original Name': ''
},
{
'Plate Position': '3',
'Primer Plate #': '3',
'Plating': 'MB_SF',
'Extraction Kit Lot': '166032128',
'Extraction Robot': 'Carmen_HOWE_KF3',
'TM1000 8 Tool': '109379Z',
'Primer Date': '2021-08-17',
'MasterMix Lot': '978215',
'Water Lot': 'RNBJ0628',
'Processing Robot': 'Echo550',
'Sample Plate': 'THDMI_UK_Plate_4',
'Project_Name': 'THDMI UK',
'Original Name': ''
},
{
'Plate Position': '4',
'Primer Plate #': '4',
'Plating': 'AS',
'Extraction Kit Lot': '166032128',
'Extraction Robot': 'Carmen_HOWE_KF4',
'TM1000 8 Tool': '109379Z',
'Primer Date': '2021-08-17',
'MasterMix Lot': '978215',
'Water Lot': 'RNBJ0628',
'Processing Robot': 'Echo550',
'Sample Plate': 'THDMI_US_Plate_6',
'Project_Name': 'THDMI US',
'Original Name': ''
}
])
columns = ['Sample', 'Row', 'Col', 'Blank', 'Project Plate',
'Project Name', 'Compressed Plate Name', 'Well']
data = [
['X00180471', 'A', 1, False, 'THDMI_10317_PUK2', 'THDMI_10317',
'THDMI_10317_UK2-US6', 'A1'],
['X00180199', 'C', 1, False, 'THDMI_10317_PUK2', 'THDMI_10317',
'THDMI_10317_UK2-US6', 'C1'],
['X00179789', 'E', 1, False, 'THDMI_10317_PUK2', 'THDMI_10317',
'THDMI_10317_UK2-US6', 'E1'],
['X00180201', 'G', 1, False, 'THDMI_10317_PUK2', 'THDMI_10317',
'THDMI_10317_UK2-US6', 'G1'],
['X00180464', 'I', 1, False, 'THDMI_10317_PUK2', 'THDMI_10317',
'THDMI_10317_UK2-US6', 'I1'],
['X00179796', 'K', 1, False, 'THDMI_10317_PUK2', 'THDMI_10317',
'THDMI_10317_UK2-US6', 'K1']]
self.df = pd.DataFrame(columns=columns, data=data)
self.seqtype = '16S'
def test_assign_emp_index_position_one(self):
obs = assign_emp_index(self.df, self.plate_metadata, self.seqtype)
data = [
['X00180471', 'A', 1, False, 'THDMI_10317_PUK2', 'THDMI_10317',
'THDMI_10317_UK2-US6', 'A1', '1', '1', 'SF', '166032128',
'Carmen_HOWE_KF3', '109379Z', '2021-08-17', '978215', 'RNBJ0628',
'Echo550', 'THDMI_UK_Plate_2', 'THDMI UK', '', '1', 'A1',
'515rcbc0', 'AATGATACGGCGACCACCGAGATCTACACGCT', 'AGCCTTCGTCGC',
'TATGGTAATT', 'GT', 'GTGYCAGCMGCCGCGGTAA',
('AATGATACGGCGACCACCGAGATCTACACGCTAG'
'CCTTCGTCGCTATGGTAATTGTGTGYCAGCMGCCGCGGTAA')],
['X00180199', 'C', 1, False, 'THDMI_10317_PUK2', 'THDMI_10317',
'THDMI_10317_UK2-US6', 'C1', '1', '1', 'SF', '166032128',
'Carmen_HOWE_KF3', '109379Z', '2021-08-17', '978215', 'RNBJ0628',
'Echo550', 'THDMI_UK_Plate_2', 'THDMI UK', '', '1', 'B1',
'515rcbc12', 'AATGATACGGCGACCACCGAGATCTACACGCT', 'CGTATAAATGCG',
'TATGGTAATT', 'GT', 'GTGYCAGCMGCCGCGGTAA',
('AATGATACGGCGACCACCGAGATCTACACGCTCG'
'TATAAATGCGTATGGTAATTGTGTGYCAGCMGCCGCGGTAA')],
['X00179789', 'E', 1, False, 'THDMI_10317_PUK2', 'THDMI_10317',
'THDMI_10317_UK2-US6', 'E1', '1', '1', 'SF', '166032128',
'Carmen_HOWE_KF3', '109379Z', '2021-08-17', '978215', 'RNBJ0628',
'Echo550', 'THDMI_UK_Plate_2', 'THDMI UK', '', '1', 'C1',
'515rcbc24', 'AATGATACGGCGACCACCGAGATCTACACGCT', 'TGACTAATGGCC',
'TATGGTAATT', 'GT', 'GTGYCAGCMGCCGCGGTAA',
('AATGATACGGCGACCACCGAGATCTACACGCTTGAC'
'TAATGGCCTATGGTAATTGTGTGYCAGCMGCCGCGGTAA')],
['X00180201', 'G', 1, False, 'THDMI_10317_PUK2', 'THDMI_10317',
'THDMI_10317_UK2-US6', 'G1', '1', '1', 'SF', '166032128',
'Carmen_HOWE_KF3', '109379Z', '2021-08-17', '978215', 'RNBJ0628',
'Echo550', 'THDMI_UK_Plate_2', 'THDMI UK', '', '1', 'D1',
'515rcbc36', 'AATGATACGGCGACCACCGAGATCTACACGCT', 'GTGGAGTCTCAT',
'TATGGTAATT', 'GT', 'GTGYCAGCMGCCGCGGTAA',
('AATGATACGGCGACCACCGAGATCTACACGC'
'TGTGGAGTCTCATTATGGTAATTGTGTGYCAGCMGCCGCGGTAA')],
['X00180464', 'I', 1, False, 'THDMI_10317_PUK2', 'THDMI_10317',
'THDMI_10317_UK2-US6', 'I1', '1', '1', 'SF', '166032128',
'Carmen_HOWE_KF3', '109379Z', '2021-08-17', '978215', 'RNBJ0628',
'Echo550', 'THDMI_UK_Plate_2', 'THDMI UK', '', '1', 'E1',
'515rcbc48', 'AATGATACGGCGACCACCGAGATCTACACGCT', 'TGATGTGCTAAG',
'TATGGTAATT', 'GT', 'GTGYCAGCMGCCGCGGTAA',
('AATGATACGGCGACCACCGAGATCTACACGCTTGATGTG'
'CTAAGTATGGTAATTGTGTGYCAGCMGCCGCGGTAA')],
['X00179796', 'K', 1, False, 'THDMI_10317_PUK2', 'THDMI_10317',
'THDMI_10317_UK2-US6', 'K1', '1', '1', 'SF', '166032128',
'Carmen_HOWE_KF3', '109379Z', '2021-08-17', '978215', 'RNBJ0628',
'Echo550', 'THDMI_UK_Plate_2', 'THDMI UK', '', '1', 'F1',
'515rcbc60', 'AATGATACGGCGACCACCGAGATCTACACGCT', 'TGTGCACGCCAT',
'TATGGTAATT', 'GT', 'GTGYCAGCMGCCGCGGTAA',
('AATGATACGGCGACCACCGAG'
'ATCTACACGCTTGTGCACGCCATTATGGTAATTGTGTGYCAGCMGCCGCGGTAA')]
]
exp = pd.DataFrame(
columns=['Sample', 'Row', 'Col', 'Blank', 'Project Plate',
'Project Name', 'Compressed Plate Name', 'Well',
'Plate Position', 'Primer Plate #', 'Plating',
'Extraction Kit Lot', 'Extraction Robot', 'TM1000 8 Tool',
'Primer Date', 'MasterMix Lot', 'Water Lot',
'Processing Robot', 'Sample Plate', 'Project_Name',
'Original Name', 'Plate', 'EMP Primer Plate Well', 'Name',
"Illumina 5prime Adapter", 'Golay Barcode',
'Forward Primer Pad', 'Forward Primer Linker',
'515FB Forward Primer (Parada)', 'Primer For PCR'],
data=data
)
pd.testing.assert_frame_equal(obs, exp)
def test_assign_emp_index_multiple_positions(self):
# change some of the well ids and their primer plates to spot check
# that correct barcodes are retrieved from the EMP indices file
# position 1 gets primer plate 5
self.plate_metadata.loc[0, 'Primer Plate #'] = '5'
self.df.loc[0, 'Row'] = 'A'
self.df.loc[0, 'Col'] = '1'
self.df.loc[0, 'Well'] = 'A1'
# position 2 gets primer plate 6
self.plate_metadata.loc[1, 'Primer Plate #'] = '6'
self.df.loc[1, 'Row'] = 'A'
self.df.loc[1, 'Col'] = '4'
self.df.loc[1, 'Well'] = 'A4'
# position 3 gets primer plate 9
self.plate_metadata.loc[2, 'Primer Plate #'] = '9'
self.df.loc[2, 'Row'] = 'D'
self.df.loc[2, 'Col'] = '7'
self.df.loc[2, 'Well'] = 'D7'
self.df.loc[3, 'Row'] = 'F'
self.df.loc[3, 'Col'] = '9'
self.df.loc[3, 'Well'] = 'F9'
# position 4 gets primer plate 10
self.plate_metadata.loc[3, 'Primer Plate #'] = '10'
self.df.loc[4, 'Row'] = 'B'
self.df.loc[4, 'Col'] = '6'
self.df.loc[4, 'Well'] = 'B6'
self.df.loc[5, 'Row'] = 'F'
self.df.loc[5, 'Col'] = '10'
self.df.loc[5, 'Well'] = 'F10'
obs1 = assign_emp_index(self.df, self.plate_metadata, '16S')
obs2 = assign_emp_index(self.df, self.plate_metadata, '18S')
obs3 = assign_emp_index(self.df, self.plate_metadata, 'ITS')
data1 = [
['X00180471', 'A', '1', False, 'THDMI_10317_PUK2', 'THDMI_10317',
'THDMI_10317_UK2-US6', 'A1', '1', '5', 'SF', '166032128',
'Carmen_HOWE_KF3', '109379Z', '2021-08-17', '978215', 'RNBJ0628',
'Echo550', 'THDMI_UK_Plate_2', 'THDMI UK', '', '5', 'A1',
'515rcbc384', 'AATGATACGGCGACCACCGAGATCTACACGCT', 'ATGTTAGGGAAT',
'TATGGTAATT', 'GT', 'GTGYCAGCMGCCGCGGTAA',
('AATGATACGGCGACCACCGAGATCTACACGCTATGTTAGG'
'GAATTATGGTAATTGTGTGYCAGCMGCCGCGGTAA')],
['X00180199', 'A', '4', False, 'THDMI_10317_PUK2', 'THDMI_10317',
'THDMI_10317_UK2-US6', 'A4', '2', '6', 'AS', '166032128',
'Carmen_HOWE_KF4', '109379Z', '2021-08-17', '978215', 'RNBJ0628',
'Echo550', 'THDMI_UK_Plate_3', 'THDMI UK', '', '6', 'A2',
'515rcbc481', 'AATGATACGGCGACCACCGAGATCTACACGCT', 'CTACCGATTGCG',
'TATGGTAATT', 'GT', 'GTGYCAGCMGCCGCGGTAA',
('AATGATACGGCGACCACCGAGATCTACACGCTCTACCGAT'
'TGCGTATGGTAATTGTGTGYCAGCMGCCGCGGTAA')],
['X00179789', 'D', '7', False, 'THDMI_10317_PUK2', 'THDMI_10317',
'THDMI_10317_UK2-US6', 'D7', '3', '9', 'MB_SF', '166032128',
'Carmen_HOWE_KF3', '109379Z', '2021-08-17', '978215', 'RNBJ0628',
'Echo550', 'THDMI_UK_Plate_4', 'THDMI UK', '', '9', 'B4',
'515rcbc783', 'AATGATACGGCGACCACCGAGATCTACACGCT', 'GGTTACGGTTAC',
'TATGGTAATT', 'GT', 'GTGYCAGCMGCCGCGGTAA',
('AATGATACGGCGACCACCGAGATCTACACGCTGGTTACGG'
'TTACTATGGTAATTGTGTGYCAGCMGCCGCGGTAA')],
['X00180201', 'F', '9', False, 'THDMI_10317_PUK2', 'THDMI_10317',
'THDMI_10317_UK2-US6', 'F9', '3', '9', 'MB_SF', '166032128',
'Carmen_HOWE_KF3', '109379Z', '2021-08-17', '978215', 'RNBJ0628',
'Echo550', 'THDMI_UK_Plate_4', 'THDMI UK', '', '9', 'C5',
'515rcbc796', 'AATGATACGGCGACCACCGAGATCTACACGCT', 'GATCTGCGATCC',
'TATGGTAATT', 'GT', 'GTGYCAGCMGCCGCGGTAA',
('AATGATACGGCGACCACCGAGATCTACACGCTGATCTGCG'
'ATCCTATGGTAATTGTGTGYCAGCMGCCGCGGTAA')],
['X00180464', 'B', '6', False, 'THDMI_10317_PUK2', 'THDMI_10317',
'THDMI_10317_UK2-US6', 'B6', '4', '10', 'AS', '166032128',
'Carmen_HOWE_KF4', '109379Z', '2021-08-17', '978215', 'RNBJ0628',
'Echo550', 'THDMI_US_Plate_6', 'THDMI US', '', '10', 'A3',
'515rcbc866', 'AATGATACGGCGACCACCGAGATCTACACGCT', 'TTGACGACATCG',
'TATGGTAATT', 'GT', 'GTGYCAGCMGCCGCGGTAA',
('AATGATACGGCGACCACCGAGATCTACACGCTTTGACGAC'
'ATCGTATGGTAATTGTGTGYCAGCMGCCGCGGTAA')],
['X00179796', 'F', '10', False, 'THDMI_10317_PUK2', 'THDMI_10317',
'THDMI_10317_UK2-US6', 'F10', '4', '10', 'AS', '166032128',
'Carmen_HOWE_KF4', '109379Z', '2021-08-17', '978215', 'RNBJ0628',
'Echo550', 'THDMI_US_Plate_6', 'THDMI US', '', '10', 'C5',
'515rcbc892', 'AATGATACGGCGACCACCGAGATCTACACGCT', 'TAGAGGCGTAGG',
'TATGGTAATT', 'GT', 'GTGYCAGCMGCCGCGGTAA',
('AATGATACGGCGACCACCGAGATCTACACGCTTAGAGGCG'
'TAGGTATGGTAATTGTGTGYCAGCMGCCGCGGTAA')]
]
data2 = [
['X00180471', 'A', '1', False, 'THDMI_10317_PUK2', 'THDMI_10317',
'THDMI_10317_UK2-US6', 'A1', '1', '5', 'SF', '166032128',
'Carmen_HOWE_KF3', '109379Z', '2021-08-17', '978215', 'RNBJ0628',
'Echo550', 'THDMI_UK_Plate_2', 'THDMI UK', '', '5', 'A1',
'EukBr_Hiseq_0411', 'CAAGCAGAAGACGGCATACGAGAT', 'AACAAACTGCCA',
'AGTCAGTCAG', 'CA', 'TGATCCTTCTGCAGGTTCACCTAC',
('CAAGCAGAAGACGGCATACGAGATAACAAACTGCCAAGTCAG'
'TCAGCATGATCCTTCTGCAGGTTCACCTAC')],
['X00180199', 'A', '4', False, 'THDMI_10317_PUK2', 'THDMI_10317',
'THDMI_10317_UK2-US6', 'A4', '2', '6', 'AS', '166032128',
'Carmen_HOWE_KF4', '109379Z', '2021-08-17', '978215', 'RNBJ0628',
'Echo550', 'THDMI_UK_Plate_3', 'THDMI UK', '', '6', 'A2',
'EukBr_Hiseq_0508', 'CAAGCAGAAGACGGCATACGAGAT', 'GGAGAGATCACG',
'AGTCAGTCAG', 'CA', 'TGATCCTTCTGCAGGTTCACCTAC',
('CAAGCAGAAGACGGCATACGAGATGGAGAGATCACGAGTCAG'
'TCAGCATGATCCTTCTGCAGGTTCACCTAC')],
['X00179789', 'D', '7', False, 'THDMI_10317_PUK2', 'THDMI_10317',
'THDMI_10317_UK2-US6', 'D7', '3', '9', 'MB_SF', '166032128',
'Carmen_HOWE_KF3', '109379Z', '2021-08-17', '978215', 'RNBJ0628',
'Echo550', 'THDMI_UK_Plate_4', 'THDMI UK', '', '9', 'B4',
'EukBr_Hiseq_0810', 'CAAGCAGAAGACGGCATACGAGAT', 'CATTTGACGACG',
'AGTCAGTCAG', 'CA', 'TGATCCTTCTGCAGGTTCACCTAC',
('CAAGCAGAAGACGGCATACGAGATCATTTGACGACGAGTCAG'
'TCAGCATGATCCTTCTGCAGGTTCACCTAC')],
['X00180201', 'F', '9', False, 'THDMI_10317_PUK2', 'THDMI_10317',
'THDMI_10317_UK2-US6', 'F9', '3', '9', 'MB_SF', '166032128',
'Carmen_HOWE_KF3', '109379Z', '2021-08-17', '978215', 'RNBJ0628',
'Echo550', 'THDMI_UK_Plate_4', 'THDMI UK', '', '9', 'C5',
'EukBr_Hiseq_0823', 'CAAGCAGAAGACGGCATACGAGAT', 'CACGTTTATTCC',
'AGTCAGTCAG', 'CA', 'TGATCCTTCTGCAGGTTCACCTAC',
('CAAGCAGAAGACGGCATACGAGATCACGTTTATTCCAGTCAG'
'TCAGCATGATCCTTCTGCAGGTTCACCTAC')],
['X00180464', 'B', '6', False, 'THDMI_10317_PUK2', 'THDMI_10317',
'THDMI_10317_UK2-US6', 'B6', '4', '10', 'AS', '166032128',
'Carmen_HOWE_KF4', '109379Z', '2021-08-17', '978215', 'RNBJ0628',
'Echo550', 'THDMI_US_Plate_6', 'THDMI US', '', '10', 'A3',
'EukBr_Hiseq_0893', 'CAAGCAGAAGACGGCATACGAGAT', 'GTATGGAGCTAT',
'AGTCAGTCAG', 'CA', 'TGATCCTTCTGCAGGTTCACCTAC',
('CAAGCAGAAGACGGCATACGAGATGTATGGAGCTATAGTCAG'
'TCAGCATGATCCTTCTGCAGGTTCACCTAC')],
['X00179796', 'F', '10', False, 'THDMI_10317_PUK2', 'THDMI_10317',
'THDMI_10317_UK2-US6', 'F10', '4', '10', 'AS', '166032128',
'Carmen_HOWE_KF4', '109379Z', '2021-08-17', '978215', 'RNBJ0628',
'Echo550', 'THDMI_US_Plate_6', 'THDMI US', '', '10', 'C5',
'EukBr_Hiseq_0919', 'CAAGCAGAAGACGGCATACGAGAT', 'CAGCCTGCAAAT',
'AGTCAGTCAG', 'CA', 'TGATCCTTCTGCAGGTTCACCTAC',
('CAAGCAGAAGACGGCATACGAGATCAGCCTGCAAATAGTCAG'
'TCAGCATGATCCTTCTGCAGGTTCACCTAC')]
]
data3 = [
['X00180471', 'A', '1', False, 'THDMI_10317_PUK2', 'THDMI_10317',
'THDMI_10317_UK2-US6', 'A1', '1', '5', 'SF', '166032128',
'Carmen_HOWE_KF3', '109379Z', '2021-08-17', '978215', 'RNBJ0628',
'Echo550', 'THDMI_UK_Plate_2', 'THDMI UK', '', '5', 'A1',
'kabir_ITS2rcbc384', 'CAAGCAGAAGACGGCATACGAGAT', 'GGAATTATCGGT',
'', 'CG', 'GCTGCGTTCTTCATCGATGC',
('CAAGCAGAAGACGGCATACGAGATGGAATTAT'
'CGGTCGGCTGCGTTCTTCATCGATGC')],
['X00180199', 'A', '4', False, 'THDMI_10317_PUK2', 'THDMI_10317',
'THDMI_10317_UK2-US6', 'A4', '2', '6', 'AS', '166032128',
'Carmen_HOWE_KF4', '109379Z', '2021-08-17', '978215', 'RNBJ0628',
'Echo550', 'THDMI_UK_Plate_3', 'THDMI UK', '', '6', 'A2',
'kabir_ITS2rcbc481', 'CAAGCAGAAGACGGCATACGAGAT', 'TGACGTAGAACT',
'', 'CG', 'GCTGCGTTCTTCATCGATGC',
('CAAGCAGAAGACGGCATACGAGATTGACGTAG'
'AACTCGGCTGCGTTCTTCATCGATGC')],
['X00179789', 'D', '7', False, 'THDMI_10317_PUK2', 'THDMI_10317',
'THDMI_10317_UK2-US6', 'D7', '3', '9', 'MB_SF', '166032128',
'Carmen_HOWE_KF3', '109379Z', '2021-08-17', '978215', 'RNBJ0628',
'Echo550', 'THDMI_UK_Plate_4', 'THDMI UK', '', '9', 'B4',
'kabir_ITS2rcbc783', 'CAAGCAGAAGACGGCATACGAGAT', 'ATCGATCCACAG',
'', 'CG', 'GCTGCGTTCTTCATCGATGC',
('CAAGCAGAAGACGGCATACGAGATATCGATCC'
'ACAGCGGCTGCGTTCTTCATCGATGC')],
['X00180201', 'F', '9', False, 'THDMI_10317_PUK2', 'THDMI_10317',
'THDMI_10317_UK2-US6', 'F9', '3', '9', 'MB_SF', '166032128',
'Carmen_HOWE_KF3', '109379Z', '2021-08-17', '978215', 'RNBJ0628',
'Echo550', 'THDMI_UK_Plate_4', 'THDMI UK', '', '9', 'C5',
'kabir_ITS2rcbc796', 'CAAGCAGAAGACGGCATACGAGAT', 'TCCAGGGCTATA',
'', 'CG', 'GCTGCGTTCTTCATCGATGC',
('CAAGCAGAAGACGGCATACGAGATTCCAGGGC'
'TATACGGCTGCGTTCTTCATCGATGC')],
['X00180464', 'B', '6', False, 'THDMI_10317_PUK2', 'THDMI_10317',
'THDMI_10317_UK2-US6', 'B6', '4', '10', 'AS', '166032128',
'Carmen_HOWE_KF4', '109379Z', '2021-08-17', '978215', 'RNBJ0628',
'Echo550', 'THDMI_US_Plate_6', 'THDMI US', '', '10', 'A3',
'kabir_ITS2rcbc866', 'CAAGCAGAAGACGGCATACGAGAT', 'ACATGTCACGTG',
'', 'CG', 'GCTGCGTTCTTCATCGATGC',
('CAAGCAGAAGACGGCATACGAGATACATGTCA'
'CGTGCGGCTGCGTTCTTCATCGATGC')],
['X00179796', 'F', '10', False, 'THDMI_10317_PUK2', 'THDMI_10317',
'THDMI_10317_UK2-US6', 'F10', '4', '10', 'AS', '166032128',
'Carmen_HOWE_KF4', '109379Z', '2021-08-17', '978215', 'RNBJ0628',
'Echo550', 'THDMI_US_Plate_6', 'THDMI US', '', '10', 'C5',
'kabir_ITS2rcbc892', 'CAAGCAGAAGACGGCATACGAGAT', 'ACGTGAGGAACG',
'', 'CG', 'GCTGCGTTCTTCATCGATGC',
('CAAGCAGAAGACGGCATACGAGATACGTGAGG'
'AACGCGGCTGCGTTCTTCATCGATGC')]
]
exp1 = pd.DataFrame(
columns=['Sample', 'Row', 'Col', 'Blank', 'Project Plate',
'Project Name', 'Compressed Plate Name', 'Well',
'Plate Position', 'Primer Plate #', 'Plating',
'Extraction Kit Lot', 'Extraction Robot',
'TM1000 8 Tool',
'Primer Date', 'MasterMix Lot', 'Water Lot',
'Processing Robot', 'Sample Plate', 'Project_Name',
'Original Name', 'Plate',
'EMP Primer Plate Well', 'Name',
"Illumina 5prime Adapter", 'Golay Barcode',
'Forward Primer Pad', 'Forward Primer Linker',
'515FB Forward Primer (Parada)', 'Primer For PCR'],
data=data1
)
exp2 = pd.DataFrame(
columns=['Sample', 'Row', 'Col', 'Blank', 'Project Plate',
'Project Name', 'Compressed Plate Name', 'Well',
'Plate Position', 'Primer Plate #', 'Plating',
'Extraction Kit Lot', 'Extraction Robot',
'TM1000 8 Tool',
'Primer Date', 'MasterMix Lot', 'Water Lot',
'Processing Robot', 'Sample Plate', 'Project_Name',
'Original Name', 'Plate',
'EMP Primer Plate Well', 'Name',
'Reverse complement of 3prime Illumina Adapter',
'Golay Barcode',
'Reverse Primer Pad', 'Reverse Primer Linker',
'Reverse primer (EukBr)', 'Primer For PCR'],
data=data2
)
exp3 = pd.DataFrame(
columns=['Sample', 'Row', 'Col', 'Blank', 'Project Plate',
'Project Name', 'Compressed Plate Name', 'Well',
'Plate Position', 'Primer Plate #', 'Plating',
'Extraction Kit Lot', 'Extraction Robot',
'TM1000 8 Tool',
'Primer Date', 'MasterMix Lot', 'Water Lot',
'Processing Robot', 'Sample Plate', 'Project_Name',
'Original Name', 'Plate',
'EMP Primer Plate Well', 'Name',
'Reverse complement of 3prime Illumina Adapter',
'Golay Barcode',
'Reverse Primer Pad', 'Reverse Primer Linker',
'ITS2 Reverse Primer', 'Primer For PCR'],
data=data3
)
pd.testing.assert_frame_equal(obs1, exp1)
| pd.testing.assert_frame_equal(obs2, exp2) | pandas.testing.assert_frame_equal |
# coding=utf-8
import os
import os.path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from loganalysis.const import *
class Log(object):
''' 调度模块Log分析接口类
主要提供如下3类功能:
a) 信息呈现
b)问题发现
c)问题定位
要求所有文件命名符合EI命名格式:子系统_时间.csv
'''
def __init__(self, directory, time_interval=None, product_type='Micro'):
'''初始化Log实例,把所有Log按照类型分类
Args:
directory: Log所在目录
time_interval: 时间范围[start, end],格式为yyyymmddhhmmss
product_type:产品类型['Macro', 'Micro'],默认为micro
'''
self._directory = directory
self._product_type = product_type
self._logfiles={}
self._time_interval = time_interval
@property
def product_type(self):
return self._product_type
@property
def directory(self):
return self._directory
def _filenames_of_type(self, filetype):
'''获取指定文件类型的所有文件名
Args:
filetype:文件类型
time_interval: 时间范围[start, end],格式为yyyymmddhhmmss
Returns:
文件名列表
'''
names_of_filetype = []
for name in np.sort(os.listdir(self._directory)):
if not name.endswith(r'.csv'):
continue
if -1 == name.find(filetype):
continue
if self._time_interval:
time = np.uint64(name.rsplit(r'.')[0].rsplit(r'_')[-1])
if time < self._time_interval[0] or time > self._time_interval[1]:
continue
names_of_filetype.append(name)
return names_of_filetype
def describle(self):
'''当前目录下相关Log文件总体描述,每类Log文件合并为一个文件
输出文件名,大小,行数,时间范围,airtime范围等,每个Log文件一列
'''
df = pd.DataFrame()
for type, logfile in self._logfiles.items():
df.at[type, 'size'] = logfile.size
df.at[type, 'num_of_files'] = len(logfile.files)
df.at[type, 'num_of_lines'] = logfile.lines
df.at[type, 'pctime_start'] = logfile.pctimes[0]
df.at[type, 'pctime_end'] = logfile.pctimes[1]
df.at[type, 'airtime_start'] = logfile.airtimes[0]
df.at[type, 'airtime_end'] = logfile.airtimes[1]
df.index.name = 'filename'
return df
class LogFile(object):
'''Log文件接口类'''
def __init__(self, type, directory, files, id_filter=None):
'''初始化Log实例,把所有Log按照类型分类
Args:
file: 文件名
type: log类型
'''
self._files = files
self._type = type
self._directory = directory
self._id_filter = id_filter
self._time_filter = None
self._size = sum([os.path.getsize(os.path.join(directory, file)) for file in files])
self._pctimes = [-1, -1]
self._airtimes = [-1, -1]
self._lines = 0
cols = ['LocalTime', 'AirTime']
for data in self.gen_of_cols(cols):
if len(data.index) == 0:
self._lines = 0
return
self._lines = self._lines + data.index.max()
if self._pctimes[0] == -1:
self._pctimes[0] = data.iat[0, 0]
self._pctimes[1] = data.iat[-1, 0]
if self._airtimes[0] == -1:
self._airtimes[0] = data.iat[0, 1]
self._airtimes[1] = data.iat[-1, 1]
@property
def type(self):
return self._type
@property
def files(self):
return self._files
@property
def size(self):
return self._size
@property
def id_filter(self):
return self._id_filter
@property
def lines(self):
'''获取文件总行数'''
return self._lines
@property
def pctimes(self):
'''PC时间范围'''
return tuple(self._pctimes)
@property
def airtimes(self):
'''AirTime时间范围'''
return tuple(self._airtimes)
@staticmethod
def addtime(time1, time2):
time1 = np.uint32(time1)
time2 = np.uint32(time2)
frm = time1 // 16 + time2 // 16
subfrm = time1 % 16 + time2 % 16
if subfrm >= 10:
subfrm -= 10
frm += 1
return frm % 0x10000000 * 16 + subfrm
@staticmethod
def difftime(time1, time2):
time1 = np.uint32(time1)
time2 = np.uint32(time2)
subfrm1 = time1 % 16
subfrm2 = time2 % 16
frm = time1 // 16 + 0x10000000 - time2 // 16
if subfrm1 >= subfrm2:
subfrm = subfrm1 - subfrm2
else:
subfrm = subfrm1 + 10 - subfrm2
frm = frm - 1
frm = frm % 0x10000000
return frm * 16 + subfrm
@staticmethod
def dectime(hextime):
hextime = np.uint32(hextime)
return hextime // 16 * 10 + hextime % 16
@staticmethod
def hextime(dectime):
dectime = np.uint32(dectime)
return dectime // 10 * 16 + dectime % 10
def gen_of_cols(self, cols=None, val_filter=None):
'''获取指定列的生成器
Args:
cols: 列名列表,如果为None,表示获取全部列
col_val_filter: 过滤条件,字典格式{'colname': [val1,]}
Yields:
生成器格式
'''
filters = {}
if val_filter:
filters.update(val_filter)
if self._id_filter:
filters.update(self._id_filter)
totcols = cols
aircol = 'AirTime'
if self._time_filter and aircol not in totcols :
totcols.append(aircol)
if cols is not None:
totcols = list(set.union(set(filters), set(cols)))
for file in self._files:
filename = os.path.join(self._directory, file)
data = pd.read_csv(filename, na_values='-', usecols=totcols)
if self._time_filter:
start, end = self._time_filter
data = data[(start<= data[aircol]) & (data[aircol]<=end)]
if not filters:
yield data
continue
mask = data[list(filters.keys())].isin(filters).all(1)
if cols is not None:
yield data[mask][cols]
else:
yield data[mask]
def get_filename_by_airtime(self, airtime):
'''根据指定时间获取文件名
Args:
airtime:
Returns:
文件名
'''
col = ['AirTime']
for file in self._files:
filename = os.path.join(self._directory, file)
data = pd.read_csv(filename, na_values='-', usecols=col)[col[0]]
if airtime < data.iat[0] or airtime > data.iat[-1]:
continue
return file
def get_data_between_airtimes(self, start_airtime, end_airtime, cols=None, val_filter=None):
'''获取指定时间范围内的数据
Args:
start_airtime:起始时间
end_airtime:截止时间
cols: 列名列表,如果为None,表示获取全部列
col_val_filter: 过滤条件,字典格式{'colname': [val1,]}
Returns:
数据,DataFrame格式
'''
assert(start_airtime <= end_airtime)
rlt = pd.DataFrame()
if cols is not None:
totcols = list(set(cols + ['AirTime']))
else:
totcols = None
for data in self.gen_of_cols(cols=totcols, val_filter=val_filter):
airtime = data['AirTime'].astype(np.uint32)
if start_airtime > airtime.iat[-1] or end_airtime < airtime.iat[0]:
continue
data = data[(airtime <= end_airtime) & (airtime >= start_airtime)]
rlt = pd.concat([rlt, data], ignore_index=True)
return rlt
def set_airtimes_interval(self, start, end):
'''指定当前log的时间范围
Args:
start:起始时间
end:截止时间
Returns:
无
'''
assert(start <= end)
self._time_filter = (start, end)
return
def reset_airtimes_interval(self):
'''重置当前log的时间范围
Args:
无
Returns:
无
'''
self._time_filter = None
return
def get_data_of_cols(self, cols, val_filter=None):
'''获取指定cols的数据
Args:
cols: 列名列表
col_val_filter: 过滤条件,字典格式{'colname': [val1,]}
Returns:
数据,DataFrame格式
'''
rlt = | pd.DataFrame() | pandas.DataFrame |
import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
def importSHGdata(names,inputpath,outputpath):
#open file for writing scans to
fScans = open(outputpath+'scans.txt','w+')
#initialize data frames to hold data
countsA = pd.DataFrame()
countsB = | pd.DataFrame() | pandas.DataFrame |
import logging
import pandas as pd
from lib.constant import Datasets
from lib.features.dtypes import dtypes_clean, dtypes_featured
# features computing functions
def _compute_acc_severity(acc_severities: pd.Series) -> str:
"""Groupby method.
Return the worst victim state for each accident.
"""
# all_severity in ['safe', inj_light', 'inj_hosp', 'killed']
acc_severities_unique = acc_severities.unique()
if 'killed' in acc_severities_unique:
max_severity = 'killed'
elif 'inj_hosp' in acc_severities_unique:
max_severity = 'inj_hosp'
elif 'inj_light' in acc_severities_unique:
max_severity = 'inj_light'
else:
max_severity = 'safe'
return max_severity
def get_pct_drivers_by_sex(drivers_count: pd.DataFrame) -> pd.DataFrame:
"""Nombre de conducteur dans la population en pourcentage par sex."""
pct_drivers_sex = drivers_count \
.mean()[['prop_drive_male', 'prop_drive_female']] \
.mul(100).round(1)
return pct_drivers_sex
def get_summary_by_sex(drivers: pd.DataFrame, pct_drivers_sex: pd.DataFrame) -> pd.DataFrame:
summary = pd.crosstab(index=drivers['acc_severity'],
columns=drivers['sexe'],
margins=True, normalize=0) \
.mul(100).round(1) \
.append(pd.DataFrame({'female': pct_drivers_sex['prop_drive_female'],
'male': pct_drivers_sex['prop_drive_male']},
index=['Prop. conducteurs'])) \
.rename({'All': 'Tous accidents',
'killed': 'Acc. mortel',
'inj_light': 'Acc. leger',
'inj_hosp': 'Acc.grave'}, axis=0) \
.rename({'female': 'Femmes', 'male': 'Hommes'}, axis=1)
return summary
def get_drivers(acc_severity: pd.DataFrame, users: pd.DataFrame) -> pd.DataFrame:
"""Retourne l'ensemble des conducteurs impliqué dans un accident.
on ajoute à la table des usager la colonne correspondant au type d'accident
dans lequel il est impliqué (= état de la victime la plus grave)
on ne récupère de cette table que les usagers conducteurs
"""
drivers = pd.merge(acc_severity, users.loc[users['catu'] == 'driver', :],
on='Num_Acc', how='left') \
.loc[:, ['Num_Acc', 'acc_severity', 'sexe', 'trajet']]
return drivers
# build new datasets
def build_accidents_dataset(caracs: pd.DataFrame,
locations: pd.DataFrame,
users: pd.DataFrame,
dtypes_base_path: str = '') -> pd.DataFrame:
logging.info('merge caracs with locations (1 accident = 1 carac = 1 location)')
acc_df = pd.merge(caracs, locations, on='Num_Acc', how='inner')
logging.info('add nb victims by severity (4 columns)')
vict_by_severity_cnt = pd.merge(caracs, users, on='Num_Acc', how='inner') \
.groupby(by=['Num_Acc', 'grav']).count().reset_index()[['Num_Acc', 'grav', 'year']] \
.rename({'year': 'victims_nb'}, axis=1) \
.fillna({'victims_nb': 0}) \
.pivot(index='Num_Acc', columns='grav')
vict_by_severity_cnt.columns = vict_by_severity_cnt.columns.get_level_values(1)
vict_by_severity_cnt.rename(columns=str).reset_index()
acc_df = pd.merge(acc_df, vict_by_severity_cnt, on='Num_Acc', how='inner')
logging.info('add total victims number column')
victims_nb_by_acc = pd.merge(caracs, users, on='Num_Acc', how='inner') \
.groupby(by=['Num_Acc']).count().reset_index()[['Num_Acc', 'year']] \
.rename({'year': 'victims_nb'}, axis=1)
acc_df = pd.merge(acc_df, victims_nb_by_acc, on='Num_Acc', how='inner')
logging.info('add acc_severity column')
acc_severity = pd.merge(acc_df, users, on='Num_Acc', how='inner') \
.loc[:, ['Num_Acc', 'grav']] \
.groupby(by='Num_Acc') \
.agg(acc_severity=pd.NamedAgg(column='grav', aggfunc=_compute_acc_severity)) \
.reset_index() \
.astype({'acc_severity': 'category', 'Num_Acc': 'string'})
acc_df = | pd.merge(acc_df, acc_severity, on='Num_Acc', how='inner') | pandas.merge |
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 12 16:15:46 2020
@author: navarrenhn
"""
import pandas as pd
def feed_demand(groups, Lancet_diet):
Region_demands = {}
for name, group in groups:
d = Lancet_diet.copy()
d["GROUP"] = name
#print(d)
##create animal product demands:
d.loc[["beef and lamb"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Meat", "Total"].min())
d.loc[["pork"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Meat", "Total"].min())
d.loc[["chicken and other poultry"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Meat", "Total"].min())
d.loc[["fish"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Fishandseafood", "Total"].min())
#d = d.drop(["fish"])
#d.loc[["whole milk or derivative equivalents"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Milk", "Total"].min())
#d.loc[["eggs"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Milk", "Total"].min())
##create feed demands:
##need to determine oil production from soymeal production for feed
##feed required for 1 dairy cow per day
##similar feed is assumed for beef cattle being reared on mixed system (pasture/crop-fed)
##all concentrates except corn and soy assumed to be by-products
##soymeal in concentrate equals 0.8 times fresh soybeans in weight (soymeal.org)
##corn equals sum of fresh and corn in concentrate
##corn (maize grain) in concentrate equals 0.86 times fresh yield? Based on dry mass?
cow_dict = {"type": ["grass", "corn", "soybean meal"], ##"citrus pulp concentrate", "palm kernel meal concentrate", "rapeseed meal concentrate", "beet pulp concentrate", "wheat concentrate", "rest products"],
"gram": [41250, 13750 + (1250*0.86), 750*0.8 ]}##, 500, 500, 500, 250, 250, 1000]}
cow_Lancet_diet_per_day = pd.DataFrame(cow_dict)
cow_Lancet_diet_per_day = cow_Lancet_diet_per_day.set_index(["type"])
cow_feed_per_g_milk = cow_Lancet_diet_per_day["gram"]/25000
##beef production from dairy cows and dairy calf rearing
calf_per_g_milk = (1.5/(25000*365*6)) ##3 calves per cow divided by two as only males are used(?)
##Type A calf of Nguyen 2010 using 8438 kg of feed per 1000 kg carcass weight (= per 660kg edible meat)
##(significantly more soymeal --> look into, maybe change Lancet_diet)
g_calf_per_g_milk = calf_per_g_milk * 214880
cow_feed_per_g_calf = ((cow_Lancet_diet_per_day["gram"]/55000)*8438000)/660000
##One 680 kg Holstein dairy cow delivers 224.52 kg of meat (excluding offal and bones) ##what to do with offal?
g_dairycow_beef_per_g_milk = 224520.0 / 36500000.0 #36500000 g milk in her milk giving time of 4 years
g_beef_per_g_milk = g_calf_per_g_milk + g_dairycow_beef_per_g_milk
##feed demand from classic suckler-cow rearing systems is 20863 kg of feed per 1000kg carcass weight (= per 660kg edible meat) (Nguyen 2010)
cow_feed_per_g_suckler_beef = ((cow_Lancet_diet_per_day["gram"]/55000)*20863000)/660000
##required extra beef production besides dairy cows and their calves to reach demand
required_extra_beef_production = max(d.loc[["beef and lamb"], ["BMI" , "EAT", "Org"]].values[0][0] - (d.loc[["whole milk or derivative equivalents"], ["BMI" , "EAT", "Org"]].values[0][0] * g_beef_per_g_milk), 0)
##this needs a lamb factor
total_feed_cows_for_Lancet_diet_per_day = (d.loc[["whole milk or derivative equivalents"], ["BMI" , "EAT", "Org"]].values[0][0] * g_calf_per_g_milk * cow_feed_per_g_calf) + (d.loc[["whole milk or derivative equivalents"], ["BMI" , "EAT", "Org"]].values[0][0] * cow_feed_per_g_milk) + (required_extra_beef_production * cow_feed_per_g_suckler_beef)
##one dutch cow delivers on average 25 liter milk per day and eats 55kg of feed a day
##assuming 3 calves per dairy cow of which half is male so used for slaughter
##one dutch dairy cow is culled after 6 years on average
##if not, how much feed does a meat cow need?
##how much manure do the cows produce? (for effect on N input ratio)
##soybean meal assumed to equal 0.8 times fresh soybean weight as in cow Lancet_diet
##whole grains assumed here
##one dutch egg-laying chicken lays 0.85232877 egg per day amounting to 19400/311.1 = 62.35937 gram egg per day
##one dutch chicken eats 121.3 gram feed per day (both broiler and egg)
##chicken feed based on Rezaei et al (high protein organic Lancet_diet) and ratios based on 1/3 of feeds used in first and 2/3 of last stages of life, byproducts and supplements (under 3%) placed in "other"
##one dutch broiler chicken lives 6 weeks, averages 2446g and delivers 166+547+243+520 = 1476 gram of meat
##is chicken manure used as fertilizer? How much manure does a chicken produce?
chicken_dict = {"type": ["wheat", "soybean meal", "rapeseed", "oats", "peas"], ##"other"],
"gram": [45.95, 21.62*0.8, 4.04, 23.15, 9.7]} ##, 16.84]}
chicken_Lancet_diet_per_day = pd.DataFrame(chicken_dict)
chicken_Lancet_diet_per_day = chicken_Lancet_diet_per_day.set_index(["type"])
chicken_feed_per_g_meat = (chicken_Lancet_diet_per_day["gram"]*42)/1476
chicken_feed_per_g_egg = chicken_Lancet_diet_per_day["gram"]/62.35937
total_feed_meat_chickens_for_Lancet_diet_per_day = chicken_feed_per_g_meat * d.loc[["chicken and other poultry"], ["BMI" , "EAT", "Org"]].values[0][0]
total_feed_egg_chickens_for_Lancet_diet_per_day = chicken_feed_per_g_egg * d.loc[["eggs"], ["BMI" , "EAT", "Org"]].values[0][0]
##feed required for 1 lamb per day
##all concentrates except corn and soy assumed to be by-products
##soymeal in concentrate equals 0.8 times fresh soybeans in weight (soymeal.org)
##corn (maize grain) in concentrate equals 0.86 times fresh yield? Based on dry mass?
##one lamb gives 35.24% of its original weight as meat. One slaughtered lamb weighs 40kg so 40* 0.3524 = 14.096 kg meat per lamb
##feed composition assumed to be similar to milk cow (both pasture raised and ruminants).Feed requirement about 1kg a day (Bello et al, 2016)
##manure production
lamb_dict = {"type": ["grass", "corn", "soybean meal"], ##"citrus pulp concentrate", "palm kernel meal concentrate", "rapeseed meal concentrate", "beet pulp concentrate", "wheat concentrate", "rest products"],
"gram": [687.5, 312.5 + (20.8*0.86), 12.5*0.8]} ##, 8.33, 8.33, 8.33, 4.15, 4.15, 16.66]}
lamb_Lancet_diet_per_day = pd.DataFrame(lamb_dict)
lamb_Lancet_diet_per_day = lamb_Lancet_diet_per_day.set_index(["type"])
lamb_feed_per_g_meat = (lamb_Lancet_diet_per_day["gram"]*365)/14096
total_feed_lamb_for_Lancet_diet_per_day = lamb_feed_per_g_meat * d.loc[["beef and lamb"], ["BMI" , "EAT", "Org"]].values[0][0]
##need to add beef/lamb ratio
##one slaughtered pig gives on average 57% of its live weight as meat, slaughtered weight is 95.2kg so 95.2*0.57 = 54.264kg meat per fattening pig
##one pig lives 88 days (based on BINternet growth per day) and uses 185,064kg of feed in its life (based on BINternet feed conversion) so eats 2,103kg of feed a day
##feed requirement based on byproducts scenario of Lassaletta et al 2016
##manure production
##swill and molasses assumed to be by-products
##are brans a by-product? Do they require extra production? Assumed to be about 10% of original crop (Feedipedia)
pig_dict = {"type": ["corn", "barley", "brans", "wheat"], ##"swill", "molasses"],
"gram": [378.54, 147.21, 525.75, 630.9]} ##, 210.3, 210.3]}
pig_Lancet_diet_per_day = pd.DataFrame(pig_dict)
pig_Lancet_diet_per_day = pig_Lancet_diet_per_day.set_index(["type"])
pig_feed_per_g_meat = (pig_Lancet_diet_per_day["gram"]*88)/54264
total_feed_pig_for_Lancet_diet_per_day = pig_feed_per_g_meat * d.loc[["pork"], ["BMI" , "EAT", "Org"]].values[0][0]
##create crop demands including demand for feed crops:
##assuming no waste in feedcrops
d.loc[["rice wheat corn and other"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Cereals", "Total"].min())
d.loc[["rice wheat corn and other"], ["BMI" , "EAT", "Org"]] += total_feed_cows_for_Lancet_diet_per_day.loc["corn"] + total_feed_meat_chickens_for_Lancet_diet_per_day.loc["wheat"] + total_feed_meat_chickens_for_Lancet_diet_per_day.loc["oats"] + total_feed_egg_chickens_for_Lancet_diet_per_day.loc["wheat"] + total_feed_egg_chickens_for_Lancet_diet_per_day.loc["oats"] + total_feed_lamb_for_Lancet_diet_per_day.loc["corn"] + total_feed_pig_for_Lancet_diet_per_day.loc["corn"] + total_feed_pig_for_Lancet_diet_per_day.loc["barley"] + total_feed_pig_for_Lancet_diet_per_day.loc["wheat"]
d.loc[["potatoes and cassava"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Rootsandtubers", "Total"].min())
d.loc[["dry beans lentils and peas"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Oilseedsandpulses", "Total"].min())
d.loc[["dry beans lentils and peas"], ["BMI" , "EAT", "Org"]] += total_feed_meat_chickens_for_Lancet_diet_per_day.loc["peas"] + total_feed_egg_chickens_for_Lancet_diet_per_day.loc["peas"]
d.loc[["soy foods"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Oilseedsandpulses", "Total"].min())
d.loc[["soy foods"], ["BMI" , "EAT", "Org"]] += total_feed_cows_for_Lancet_diet_per_day.loc["soybean meal"] + total_feed_lamb_for_Lancet_diet_per_day.loc["soybean meal"] + total_feed_meat_chickens_for_Lancet_diet_per_day.loc["soybean meal"] + total_feed_egg_chickens_for_Lancet_diet_per_day.loc["soybean meal"]
d.loc[["peanuts"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Oilseedsandpulses", "Total"].min())
d.loc[["tree nuts"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Oilseedsandpulses", "Total"].min())
#d.loc[["palm oil"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Oilseedsandpulses", "Total"].min())
d.loc[["unsaturated oils"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Oilseedsandpulses", "Total"].min())
d.loc[["unsaturated oils"], ["BMI" , "EAT", "Org"]] += total_feed_meat_chickens_for_Lancet_diet_per_day.loc["rapeseed"] + total_feed_egg_chickens_for_Lancet_diet_per_day.loc["rapeseed"]
d.loc[["all fruit"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Fruitsandvegetables", "Total"].min())
#d.loc[["all vegetables"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Fruitsandvegetables", "Total"].min())
d.loc[["dark green vegetables"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Fruitsandvegetables", "Total"].min())
d.loc[["red and orange vegetables"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Fruitsandvegetables", "Total"].min())
Region_demands[name] = d.loc[(Lancet_diet["GROUP"] == name)]
return Region_demands
def feed_remove(groups, Lancet_diet):
Region_demands = {}
for name, group in groups:
d = Lancet_diet.copy()
d["GROUP"] = name
#print(d)
##create animal product demands:
d.loc[["beef and lamb"], ["Org_nf"]] *= (1 - group.loc[group["Foodtype"] == "Meat", "Total"].min())
d.loc[["pork"], ["Org_nf"]] *= (1 - group.loc[group["Foodtype"] == "Meat", "Total"].min())
d.loc[["chicken and other poultry"], ["Org_nf"]] *= (1 - group.loc[group["Foodtype"] == "Meat", "Total"].min())
d.loc[["fish"], ["Org_nf"]] *= (1 - group.loc[group["Foodtype"] == "Fishandseafood", "Total"].min())
#d = d.drop(["fish"])
#d.loc[["whole milk or derivative equivalents"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Milk", "Total"].min())
#d.loc[["eggs"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Milk", "Total"].min())
##create feed demands:
##need to determine oil production from soymeal production for feed
##feed required for 1 dairy cow per day
##similar feed is assumed for beef cattle being reared on mixed system (pasture/crop-fed)
##all concentrates except corn and soy assumed to be by-products
##soymeal in concentrate equals 0.8 times fresh soybeans in weight (soymeal.org)
##corn equals sum of fresh and corn in concentrate
##corn (maize grain) in concentrate equals 0.86 times fresh yield? Based on dry mass?
cow_dict = {"type": ["grass", "corn", "soybean meal"], ##"citrus pulp concentrate", "palm kernel meal concentrate", "rapeseed meal concentrate", "beet pulp concentrate", "wheat concentrate", "rest products"],
"gram": [41250, 13750 + (1250/0.86), 750/0.8 ]}##, 500, 500, 500, 250, 250, 1000]}
cow_Lancet_diet_per_day = pd.DataFrame(cow_dict)
cow_Lancet_diet_per_day = cow_Lancet_diet_per_day.set_index(["type"])
cow_feed_per_g_milk = cow_Lancet_diet_per_day["gram"]/25000
##beef production from dairy cows and dairy calf rearing
calf_per_g_milk = (1.5/(25000*365*6)) ##3 calves per cow divided by two as only males are used(?)
##Type A calf of Nguyen 2010 using 8438 kg of feed per 1000 kg carcass weight (= per 660kg edible meat)
##(significantly more soymeal --> look into, maybe change Lancet_diet)
g_calf_per_g_milk = calf_per_g_milk * 214880
cow_feed_per_g_calf = ((cow_Lancet_diet_per_day["gram"]/55000)*8438000)/660000
##One 680 kg Holstein dairy cow delivers 224.52 kg of meat (excluding offal and bones) ##what to do with offal?
g_dairycow_beef_per_g_milk = 224520.0 / 36500000.0 #36500000 g milk in her milk giving time of 4 years
g_beef_per_g_milk = g_calf_per_g_milk + g_dairycow_beef_per_g_milk
##feed demand from classic suckler-cow rearing systems is 20863 kg of feed per 1000kg carcass weight (= per 660kg edible meat) (Nguyen 2010)
cow_feed_per_g_suckler_beef = ((cow_Lancet_diet_per_day["gram"]/55000)*20863000)/660000
##required extra beef production besides dairy cows and their calves to reach demand
required_extra_beef_production = max(d.loc[["beef and lamb"], ["Org_nf"]].values[0][0] - (d.loc[["whole milk or derivative equivalents"], ["Org_nf"]].values[0][0] * g_beef_per_g_milk), 0)
##this needs a lamb factor
total_feed_cows_for_Lancet_diet_per_day = (d.loc[["whole milk or derivative equivalents"], ["Org_nf"]].values[0][0] * g_calf_per_g_milk * cow_feed_per_g_calf) + (d.loc[["whole milk or derivative equivalents"], ["Org_nf"]].values[0][0] * cow_feed_per_g_milk) + (required_extra_beef_production * cow_feed_per_g_suckler_beef)
##one dutch cow delivers on average 25 liter milk per day and eats 55kg of feed a day
##assuming 3 calves per dairy cow of which half is male so used for slaughter
##one dutch dairy cow is culled after 6 years on average
##if not, how much feed does a meat cow need?
##how much manure do the cows produce? (for effect on N input ratio)
##soybean meal assumed to equal 0.8 times fresh soybean weight as in cow Lancet_diet
##whole grains assumed here
##one dutch egg-laying chicken lays 0.85232877 egg per day amounting to 19400/311.1 = 62.35937 gram egg per day
##one dutch chicken eats 121.3 gram feed per day (both broiler and egg)
##chicken feed based on Rezaei et al (high protein organic Lancet_diet) and ratios based on 1/3 of feeds used in first and 2/3 of last stages of life, byproducts and supplements (under 3%) placed in "other"
##one dutch broiler chicken lives 6 weeks, averages 2446g and delivers 166+547+243+520 = 1476 gram of meat
##is chicken manure used as fertilizer? How much manure does a chicken produce?
chicken_dict = {"type": ["wheat", "soybean meal", "rapeseed", "oats", "peas"], ##"other"],
"gram": [45.95, 21.62/0.8, 4.04, 23.15, 9.7]} ##, 16.84]}
chicken_Lancet_diet_per_day = pd.DataFrame(chicken_dict)
chicken_Lancet_diet_per_day = chicken_Lancet_diet_per_day.set_index(["type"])
chicken_feed_per_g_meat = (chicken_Lancet_diet_per_day["gram"]*42)/1476
chicken_feed_per_g_egg = chicken_Lancet_diet_per_day["gram"]/62.35937
total_feed_meat_chickens_for_Lancet_diet_per_day = chicken_feed_per_g_meat * d.loc[["chicken and other poultry"], ["Org_nf"]].values[0][0]
total_feed_egg_chickens_for_Lancet_diet_per_day = chicken_feed_per_g_egg * d.loc[["eggs"], ["Org_nf"]].values[0][0]
##feed required for 1 lamb per day
##all concentrates except corn and soy assumed to be by-products
##soymeal in concentrate equals 0.8 times fresh soybeans in weight (soymeal.org)
##corn (maize grain) in concentrate equals 0.86 times fresh yield? Based on dry mass?
##one lamb gives 35.24% of its original weight as meat. One slaughtered lamb weighs 40kg so 40* 0.3524 = 14.096 kg meat per lamb
##feed composition assumed to be similar to milk cow (both pasture raised and ruminants).Feed requirement about 1kg a day (Bello et al, 2016)
##manure production
lamb_dict = {"type": ["grass", "corn", "soybean meal"], ##"citrus pulp concentrate", "palm kernel meal concentrate", "rapeseed meal concentrate", "beet pulp concentrate", "wheat concentrate", "rest products"],
"gram": [687.5, 312.5 + (20.8/0.86), 12.5/0.8]} ##, 8.33, 8.33, 8.33, 4.15, 4.15, 16.66]}
lamb_Lancet_diet_per_day = | pd.DataFrame(lamb_dict) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 13 15:21:55 2019
@author: raryapratama
"""
#%%
#Step (1): Import Python libraries, set land conversion scenarios general parameters
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import quad
import seaborn as sns
import pandas as pd
#PF_FP_S1 Scenario
##Set parameters
#Parameters for primary forest
initAGB = 233 #source: van Beijma et al. (2018)
initAGB_min = 233-72
initAGB_max = 233 + 72
#parameters for timber plantation. Source: Khasanah et al. (2015)
tf = 201
a = 0.082
b = 2.53
#%%
#Step (2_1): C loss from the harvesting/clear cut
df1 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_S1')
#df2 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL.xlsx', 'RIL_S2')
dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_E')
t = range(0,tf,1)
c_firewood_energy_S1 = df1['Firewood_other_energy_use'].values
#c_loss_S2 = df2['C_loss'].values
c_firewood_energy_E = dfE['Firewood_other_energy_use'].values
#%%
#Step (2_2): C loss from the harvesting/clear cut as wood pellets
dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_E')
c_pellets_E = dfE['Wood_pellets'].values
#%%
#Step (3): Aboveground biomass (AGB) decomposition
#S1
df = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_S1')
tf = 201
t = np.arange(tf)
def decomp_S1(t,remainAGB_S1):
return (1-(1-np.exp(-a*t))**b)*remainAGB_S1
#set zero matrix
output_decomp_S1 = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_S1 in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_S1[i:,i] = decomp_S1(t[:len(t)-i],remain_part_S1)
print(output_decomp_S1[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S1 = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_S1[:,i] = np.diff(output_decomp_S1[:,i])
i = i + 1
print(subs_matrix_S1[:,:4])
print(len(subs_matrix_S1))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S1 = subs_matrix_S1.clip(max=0)
print(subs_matrix_S1[:,:4])
#make the results as absolute values
subs_matrix_S1 = abs(subs_matrix_S1)
print(subs_matrix_S1[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S1 = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_S1)
subs_matrix_S1 = np.vstack((zero_matrix_S1, subs_matrix_S1))
print(subs_matrix_S1[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S1 = (tf,1)
decomp_tot_S1 = np.zeros(matrix_tot_S1)
i = 0
while i < tf:
decomp_tot_S1[:,0] = decomp_tot_S1[:,0] + subs_matrix_S1[:,i]
i = i + 1
print(decomp_tot_S1[:,0])
#S1_C
df = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_C_S1')
tf = 201
t = np.arange(tf)
def decomp_S1_C(t,remainAGB_S1_C):
return (1-(1-np.exp(-a*t))**b)*remainAGB_S1_C
#set zero matrix
output_decomp_S1_C = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_S1_C in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_S1_C[i:,i] = decomp_S1_C(t[:len(t)-i],remain_part_S1_C)
print(output_decomp_S1_C[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S1_C = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_S1_C[:,i] = np.diff(output_decomp_S1_C[:,i])
i = i + 1
print(subs_matrix_S1_C[:,:4])
print(len(subs_matrix_S1_C))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S1_C = subs_matrix_S1_C.clip(max=0)
print(subs_matrix_S1_C[:,:4])
#make the results as absolute values
subs_matrix_S1_C = abs(subs_matrix_S1_C)
print(subs_matrix_S1_C[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S1_C = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_S1_C)
subs_matrix_S1_C = np.vstack((zero_matrix_S1_C, subs_matrix_S1_C))
print(subs_matrix_S1_C[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S1_C = (tf,1)
decomp_tot_S1_C = np.zeros(matrix_tot_S1_C)
i = 0
while i < tf:
decomp_tot_S1_C[:,0] = decomp_tot_S1_C[:,0] + subs_matrix_S1_C[:,i]
i = i + 1
print(decomp_tot_S1_C[:,0])
#E
df = | pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_E') | pandas.read_excel |
#Download the libraries
import sys
import nltk
import re
import pandas as pd
import numpy as np
import csv
import os
import random
import numpy as np
#from collections import Counter
from sklearn.externals import joblib
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from collections import Counter
from textblob import TextBlob
from textblob import Blobber
from textblob.sentiments import NaiveBayesAnalyzer
data_t = sys.argv[1]
data_t = data_t.decode('ascii', 'replace')
data_t = data_t.lower()
# In[23]:
#Abbriviation translation
with open('./ML/Python/abbrev.csv', mode='r') as infile:
reader = csv.reader(infile)
replacement = {rows[0].lower(): rows[1].lower() for rows in reader}
# In[24]:
result = pd.DataFrame()
result = data_t
# In[25]:
for i in range(len(result)):
data_t = ' '.join([replacement.get(w, w) for w in data_t.split()])
# In[26]:
#lowercase
data_t = data_t.lower()
#Remove urls
data_t = data_t.replace(
r'(http.*) |(http.*)$|\n',
"", )
#Remove twitter handles
data_t = data_t.replace(r"@\\w+", "")
#remove htmls
data_t = data_t.replace(r'<.*?>', "")
#Remove citations
data_t = data_t.replace(r'@[a-zA-Z0-9]*', "")
#remove _
#data_t = data_t.str.replace(r'\_+',"")
# In[27]:
vec = joblib.load('./ML/Python/sentiment/vec.pkl')
# In[28]:
data_tt = data_t
# In[29]:
data_tt = data_tt.split(" ")
data_text = pd.DataFrame(data_tt)
old = len(data_text)
names = vec.get_feature_names()
names_text = pd.DataFrame(names)
new = len(data_text.merge(names_text, how='inner'))
# In[ ]:
if (old == new):
sclf = joblib.load('./ML/Python/sentiment/stacking.pkl')
#Vectorization of the string
df3 = | pd.Series(data_t) | pandas.Series |
"""
Created on Sun Mar 18 11:17:53 2018
@author: Mehul
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
## Load data
data_train = | pd.read_csv('../dataset/Google_Stock_Price_Train.csv') | pandas.read_csv |
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from sklearn import preprocessing
from . import utils
from . import timeseries
from . import pos
from . import txn
from .utils import APPROX_BDAYS_PER_MONTH
from functools import wraps
def plotting_context(func):
"""Decorator to set plotting context during function call."""
@wraps(func)
def call_w_context(*args, **kwargs):
set_context = kwargs.pop('set_context', True)
if set_context:
with context():
return func(*args, **kwargs)
else:
return func(*args, **kwargs)
return call_w_context
def context(context='notebook', font_scale=1.5, rc=None):
"""Create pyfolio default plotting style context.
Under the hood, calls and returns seaborn.plotting_context() with
some custom settings. Usually you would use in a with-context.
Parameters
----------
context : str, optional
Name of seaborn context.
font_scale : float, optional
Scale font by factor font_scale.
rc : dict, optional
Config flags.
By default, {'lines.linewidth': 1.5,
'axes.facecolor': '0.995',
'figure.facecolor': '0.97'}
is being used and will be added to any
rc passed in, unless explicitly overriden.
Returns
-------
seaborn plotting context
Example
-------
>>> with pyfolio.plotting.context(font_scale=2):
>>> pyfolio.create_full_tear_sheet()
See also
--------
For more information, see seaborn.plotting_context().
"""
if rc is None:
rc = {}
rc_default = {'lines.linewidth': 1.5,
'axes.facecolor': '0.995',
'figure.facecolor': '0.97'}
# Add defaults if they do not exist
for name, val in rc_default.items():
rc.setdefault(name, val)
return sns.plotting_context(context=context, font_scale=font_scale,
rc=rc)
def plot_rolling_fama_french(
returns,
factor_returns=None,
rolling_window=APPROX_BDAYS_PER_MONTH * 6,
legend_loc='best',
ax=None, **kwargs):
"""Plots rolling Fama-French single factor betas.
Specifically, plots SMB, HML, and UMD vs. date with a legend.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.DataFrame, optional
data set containing the Fama-French risk factors. See
utils.load_portfolio_risk_factors.
rolling_window : int, optional
The days window over which to compute the beta.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
num_months_str = '%.0f' % (rolling_window / APPROX_BDAYS_PER_MONTH)
ax.set_title(
"Rolling Fama-French Single Factor Betas (" +
num_months_str +
'-month)')
ax.set_ylabel('beta')
rolling_beta = timeseries.rolling_fama_french(
returns,
factor_returns=factor_returns,
rolling_window=rolling_window)
rolling_beta.plot(alpha=0.7, ax=ax, **kwargs)
ax.axhline(0.0, color='black')
ax.legend(['Small-Caps (SMB)',
'High-Growth (HML)',
'Momentum (UMD)'],
loc=legend_loc)
ax.set_ylim((-2.0, 2.0))
y_axis_formatter = FuncFormatter(utils.one_dec_places)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
ax.axhline(0.0, color='black')
ax.set_xlabel('')
return ax
def plot_monthly_returns_heatmap(returns, ax=None, **kwargs):
"""
Plots a heatmap of returns by month.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to seaborn plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
monthly_ret_table = timeseries.aggregate_returns(returns,
'monthly')
monthly_ret_table = monthly_ret_table.unstack()
monthly_ret_table = np.round(monthly_ret_table, 3)
sns.heatmap(
monthly_ret_table.fillna(0) *
100.0,
annot=True,
annot_kws={
"size": 9},
alpha=1.0,
center=0.0,
cbar=False,
cmap=matplotlib.cm.RdYlGn,
ax=ax, **kwargs)
ax.set_ylabel('Year')
ax.set_xlabel('Month')
ax.set_title("Monthly Returns (%)")
return ax
def plot_annual_returns(returns, ax=None, **kwargs):
"""
Plots a bar graph of returns by year.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
x_axis_formatter = FuncFormatter(utils.percentage)
ax.xaxis.set_major_formatter(FuncFormatter(x_axis_formatter))
ax.tick_params(axis='x', which='major', labelsize=10)
ann_ret_df = pd.DataFrame(
timeseries.aggregate_returns(
returns,
'yearly'))
ax.axvline(
100 *
ann_ret_df.values.mean(),
color='steelblue',
linestyle='--',
lw=4,
alpha=0.7)
(100 * ann_ret_df.sort_index(ascending=False)
).plot(ax=ax, kind='barh', alpha=0.70, **kwargs)
ax.axvline(0.0, color='black', linestyle='-', lw=3)
ax.set_ylabel('Year')
ax.set_xlabel('Returns')
ax.set_title("Annual Returns")
ax.legend(['mean'])
return ax
def plot_monthly_returns_dist(returns, ax=None, **kwargs):
"""
Plots a distribution of monthly returns.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
x_axis_formatter = FuncFormatter(utils.percentage)
ax.xaxis.set_major_formatter(FuncFormatter(x_axis_formatter))
ax.tick_params(axis='x', which='major', labelsize=10)
monthly_ret_table = timeseries.aggregate_returns(returns, 'monthly')
ax.hist(
100 * monthly_ret_table,
color='orangered',
alpha=0.80,
bins=20,
**kwargs)
ax.axvline(
100 * monthly_ret_table.mean(),
color='gold',
linestyle='--',
lw=4,
alpha=1.0)
ax.axvline(0.0, color='black', linestyle='-', lw=3, alpha=0.75)
ax.legend(['mean'])
ax.set_ylabel('Number of months')
ax.set_xlabel('Returns')
ax.set_title("Distribution of Monthly Returns")
return ax
def plot_holdings(returns, positions, legend_loc='best', ax=None, **kwargs):
"""Plots total amount of stocks with an active position, either short
or long.
Displays daily total, daily average per month, and all-time daily
average.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame, optional
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
positions = positions.copy().drop('cash', axis='columns')
df_holdings = positions.apply(lambda x: np.sum(x != 0), axis='columns')
df_holdings_by_month = df_holdings.resample('1M', how='mean')
df_holdings.plot(color='steelblue', alpha=0.6, lw=0.5, ax=ax, **kwargs)
df_holdings_by_month.plot(
color='orangered',
alpha=0.5,
lw=2,
ax=ax,
**kwargs)
ax.axhline(
df_holdings.values.mean(),
color='steelblue',
ls='--',
lw=3,
alpha=1.0)
ax.set_xlim((returns.index[0], returns.index[-1]))
ax.legend(['Daily holdings',
'Average daily holdings, by month',
'Average daily holdings, net'],
loc=legend_loc)
ax.set_title('Holdings per Day')
ax.set_ylabel('Amount of holdings per day')
ax.set_xlabel('')
return ax
def plot_drawdown_periods(returns, top=10, ax=None, **kwargs):
"""
Plots cumulative returns highlighting top drawdown periods.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
top : int, optional
Amount of top drawdowns periods to plot (default 10).
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
y_axis_formatter = FuncFormatter(utils.one_dec_places)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
df_cum_rets = timeseries.cum_returns(returns, starting_value=1.0)
df_drawdowns = timeseries.gen_drawdown_table(returns, top=top)
df_cum_rets.plot(ax=ax, **kwargs)
lim = ax.get_ylim()
colors = sns.cubehelix_palette(len(df_drawdowns))[::-1]
for i, (peak, recovery) in df_drawdowns[
['peak date', 'recovery date']].iterrows():
if | pd.isnull(recovery) | pandas.isnull |
# %%
import pandas as pd
import numpy as np
# %%
data = pd.read_csv("All Menu (Various Versions)/국방부메뉴_v2.0.csv", index_col=0)
data
# %%
# 기준열량을 활용하여 영양성분 값을 바꿀꺼여서 기준양은 삭제합니다.
data = data.drop(columns=['기준양'])
data
# %%
# 테스트 행을 삭제합니다.
data = data.drop(1412)
data = data.reset_index(drop=True)
data
# %%
# 열량 값이 없는 메뉴는 기준열량을 가져옵니다.
for index, row in data.iterrows():
if row['열량'] is np.NAN:
data['열량'][index] = row['기준열량']
data
# %%
# 열량 열도 int 타입으로 바꿔줍니다.
for index, row in data.iterrows():
cal = str(row['열량'])
if cal[-4:] == 'kcal': cal = cal[:-4]
if cal[0] == '*': cal = cal[2:]
if cal == '0': cal = row['기준열량']
cal = cal.replace(',', '')
data['열량'][index] = int(round(float(cal)))
data['열량'] = pd.to_numeric(data['열량'])
data
# %%
# 기준열량, 탄수화물, 지방, 단백질, 나트륨, 콜레스트롤 열을 다 int 타입으로 바꿔줍니다.
for index, row in data.iterrows():
data['기준열량'][index] = row['기준열량'].replace(',', '')
data['탄수화물'][index] = row['탄수화물'].replace('-', '0')
data['지방'][index] = row['지방'].replace('-', '0')
data['단백질'][index] = row['단백질'].replace('-', '0')
data['나트륨'][index] = row['나트륨'].replace('-', '0')
data['나트륨'][index] = row['나트륨'].replace(',', '')
data['콜레스트롤'][index] = row['콜레스트롤'].replace('-', '0')
data['콜레스트롤'][index] = row['콜레스트롤'].replace(',', '')
if row['나트륨'] == '--': data['나트륨'][index] = '0'
if row['콜레스트롤'] == '--': data['콜레스트롤'][index] = '0'
data['기준열량'] = pd.to_numeric(data['기준열량'])
data['탄수화물'] = pd.to_numeric(data['탄수화물'])
data['지방'] = pd.to_numeric(data['지방'])
data['단백질'] = pd.to_ | numeric(data['단백질']) | pandas.to_numeric |
import pandas as pd
import numpy as np
import math
import os
from scipy.interpolate import interp1d
import time
from sklearn.ensemble import RandomForestRegressor
import xgboost as xgb
from lightgbm import LGBMRegressor
from catboost import CatBoostRegressor
from information_measures import *
from joblib import Parallel, delayed
#from arch import arch_model
def rmspe(y_true, y_pred):
return (np.sqrt(np.mean(np.square((y_true - y_pred) / y_true))))
def log_return(list_stock_prices): # Stock prices are estimated through wap values
return np.log(list_stock_prices).diff()
def realized_volatility(series_log_return):
return np.sqrt(np.sum(series_log_return**2))
def compute_wap(book_pd):
wap = (book_pd['bid_price1'] * book_pd['ask_size1'] + book_pd['ask_price1'] * book_pd['bid_size1']) / (book_pd['bid_size1']+ book_pd['ask_size1'])
return wap
def realized_volatility_from_book_pd(book_stock_time):
wap = compute_wap(book_stock_time)
returns = log_return(wap)
volatility = realized_volatility(returns)
return volatility
def realized_volatility_per_time_id(file_path, prediction_column_name):
df_book_data = pd.read_parquet(file_path)
# Estimate stock price per time point
df_book_data['wap'] = compute_wap(df_book_data)
# Compute log return from wap values per time_id
df_book_data['log_return'] = df_book_data.groupby(['time_id'])['wap'].apply(log_return)
df_book_data = df_book_data[~df_book_data['log_return'].isnull()]
# Compute the square root of the sum of log return squared to get realized volatility
df_realized_vol_per_stock = pd.DataFrame(df_book_data.groupby(['time_id'])['log_return'].agg(realized_volatility)).reset_index()
# Formatting
df_realized_vol_per_stock = df_realized_vol_per_stock.rename(columns = {'log_return':prediction_column_name})
stock_id = file_path.split('=')[1]
df_realized_vol_per_stock['row_id'] = df_realized_vol_per_stock['time_id'].apply(lambda x:f'{stock_id}-{x}')
return df_realized_vol_per_stock[['row_id',prediction_column_name]]
def past_realized_volatility_per_stock(list_file,prediction_column_name):
df_past_realized = pd.DataFrame()
for file in list_file:
df_past_realized = pd.concat([df_past_realized,
realized_volatility_per_time_id(file,prediction_column_name)])
return df_past_realized
def stupidForestPrediction(book_path_train,prediction_column_name,train_targets_pd,book_path_test):
naive_predictions_train = past_realized_volatility_per_stock(list_file=book_path_train,prediction_column_name=prediction_column_name)
df_joined_train = train_targets_pd.merge(naive_predictions_train[['row_id','pred']], on = ['row_id'], how = 'left')
X = np.array(df_joined_train['pred']).reshape(-1,1)
y = np.array(df_joined_train['target']).reshape(-1,)
regr = RandomForestRegressor(random_state=0)
regr.fit(X, y)
naive_predictions_test = past_realized_volatility_per_stock(list_file=book_path_test,prediction_column_name='target')
yhat = regr.predict(np.array(naive_predictions_test['target']).reshape(-1,1))
updated_predictions = naive_predictions_test.copy()
updated_predictions['target'] = yhat
return updated_predictions
def garch_fit_predict_volatility(returns_series, N=10000):
model = arch_model(returns_series * N, p=1, q=1)
model_fit = model.fit(update_freq=0, disp='off')
yhat = model_fit.forecast(horizon=600, reindex=False)
pred_volatility = np.sqrt(np.sum(yhat.variance.values)) / N
return pred_volatility
def garch_volatility_per_time_id(file_path, prediction_column_name):
# read the data
df_book_data = pd.read_parquet(file_path)
# calculate the midprice (not the WAP)
df_book_data['midprice'] =(df_book_data['bid_price1'] + df_book_data['ask_price1'])/2
# leave only WAP for now
df_book_data = df_book_data[['time_id', 'seconds_in_bucket', 'midprice']]
df_book_data = df_book_data.sort_values('seconds_in_bucket')
# make the book updates evenly spaced
df_book_data_evenly = pd.DataFrame({'time_id':np.repeat(df_book_data['time_id'].unique(), 600),
'second':np.tile(range(0,600), df_book_data['time_id'].nunique())})
df_book_data_evenly['second'] = df_book_data_evenly['second'].astype(np.int16)
df_book_data_evenly = df_book_data_evenly.sort_values('second')
df_book_data_evenly = pd.merge_asof(df_book_data_evenly,
df_book_data,
left_on='second',right_on='seconds_in_bucket',
by = 'time_id')
# Ordering for easier use
df_book_data_evenly = df_book_data_evenly[['time_id', 'second', 'midprice']]
df_book_data_evenly = df_book_data_evenly.sort_values(['time_id','second']).reset_index(drop=True)
# calculate log returns
df_book_data_evenly['log_return'] = df_book_data_evenly.groupby(['time_id'])['midprice'].apply(log_return)
df_book_data_evenly = df_book_data_evenly[~df_book_data_evenly['log_return'].isnull()]
# fit GARCH(1, 1) and predict the volatility of returns
df_garch_vol_per_stock = \
pd.DataFrame(df_book_data_evenly.groupby(['time_id'])['log_return'].agg(garch_fit_predict_volatility)).reset_index()
df_garch_vol_per_stock = df_garch_vol_per_stock.rename(columns = {'log_return':prediction_column_name})
# add row_id column to the data
stock_id = file_path.split('=')[1]
df_garch_vol_per_stock['row_id'] = df_garch_vol_per_stock['time_id'].apply(lambda x:f'{stock_id}-{x}')
# return the result
return df_garch_vol_per_stock[['row_id', prediction_column_name]]
def garch_volatility_per_stock(list_file, prediction_column_name):
df_garch_predicted = pd.DataFrame()
for file in list_file:
df_garch_predicted = pd.concat([df_garch_predicted,
garch_volatility_per_time_id(file, prediction_column_name)])
return df_garch_predicted
def entropy_from_book(book_stock_time,last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 3:
return 0
wap = compute_wap(book_stock_time)
t_init = book_stock_time['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, wap, kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_wap(wap,seconds,last_seconds):
if last_seconds < 600:
idx = np.where(seconds >= last_seconds)[0]
if len(idx) < 3:
return 0
else:
wap = wap[idx]
seconds = seconds[idx]
# Closest neighbour interpolation (no changes in wap between lines)
t_new = np.arange(np.min(seconds),np.max(seconds))
nearest = interp1d(seconds, wap, kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
# sampleEntropy = ApEn_new(resampled_wap,3,0.001)
return sampleEntropy
def linearFit(book_stock_time, last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 2:
return 0
wap = np.array(compute_wap(book_stock_time))
t_init = book_stock_time['seconds_in_bucket']
return (wap[-1] - wap[0])/(np.max(t_init) - np.min(t_init))
def wapStat(book_stock_time, last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 2:
return 0
wap = compute_wap(book_stock_time)
t_init = book_stock_time['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, wap, kind='nearest')
resampled_wap = nearest(t_new)
return np.std(resampled_wap)
def entropy_Prediction(book_path_train,prediction_column_name,train_targets_pd,book_path_test,all_stocks_ids,test_file):
# Compute features
book_features_encoded_test = computeFeatures_1(book_path_test,'test',test_file,all_stocks_ids)
book_features_encoded_train = computeFeatures_1(book_path_train,'train',train_targets_pd,all_stocks_ids)
X = book_features_encoded_train.drop(['row_id','target','stock_id'],axis=1)
y = book_features_encoded_train['target']
# Modeling
catboost_default = CatBoostRegressor(verbose=0)
catboost_default.fit(X,y)
# Predict
X_test = book_features_encoded_test.drop(['row_id','stock_id'],axis=1)
yhat = catboost_default.predict(X_test)
# Formatting
yhat_pd = pd.DataFrame(yhat,columns=['target'])
predictions = pd.concat([test_file,yhat_pd],axis=1)
return predictions
def computeFeatures_1(book_path,prediction_column_name,train_targets_pd,all_stocks_ids):
book_all_features = pd.DataFrame()
encoder = np.eye(len(all_stocks_ids))
stocks_id_list, row_id_list = [], []
volatility_list, entropy2_list = [], []
linearFit_list, linearFit5_list, linearFit2_list = [], [], []
wap_std_list, wap_std5_list, wap_std2_list = [], [], []
for file in book_path:
start = time.time()
book_stock = pd.read_parquet(file)
stock_id = file.split('=')[1]
print('stock id computing = ' + str(stock_id))
stock_time_ids = book_stock['time_id'].unique()
for time_id in stock_time_ids:
# Access book data at this time + stock
book_stock_time = book_stock[book_stock['time_id'] == time_id]
# Create feature matrix
stocks_id_list.append(stock_id)
row_id_list.append(str(f'{stock_id}-{time_id}'))
volatility_list.append(realized_volatility_from_book_pd(book_stock_time=book_stock_time))
entropy2_list.append(entropy_from_book(book_stock_time=book_stock_time,last_min=2))
linearFit_list.append(linearFit(book_stock_time=book_stock_time,last_min=10))
linearFit5_list.append(linearFit(book_stock_time=book_stock_time,last_min=5))
linearFit2_list.append(linearFit(book_stock_time=book_stock_time,last_min=2))
wap_std_list.append(wapStat(book_stock_time=book_stock_time,last_min=10))
wap_std5_list.append(wapStat(book_stock_time=book_stock_time,last_min=5))
wap_std2_list.append(wapStat(book_stock_time=book_stock_time,last_min=2))
print('Computing one stock entropy took', time.time() - start, 'seconds for stock ', stock_id)
# Merge targets
stocks_id_pd = pd.DataFrame(stocks_id_list,columns=['stock_id'])
row_id_pd = pd.DataFrame(row_id_list,columns=['row_id'])
volatility_pd = pd.DataFrame(volatility_list,columns=['volatility'])
entropy2_pd = pd.DataFrame(entropy2_list,columns=['entropy2'])
linearFit_pd = pd.DataFrame(linearFit_list,columns=['linearFit_coef'])
linearFit5_pd = pd.DataFrame(linearFit5_list,columns=['linearFit_coef5'])
linearFit2_pd = pd.DataFrame(linearFit2_list,columns=['linearFit_coef2'])
wap_std_pd = pd.DataFrame(wap_std_list,columns=['wap_std'])
wap_std5_pd = pd.DataFrame(wap_std5_list,columns=['wap_std5'])
wap_std2_pd = pd.DataFrame(wap_std2_list,columns=['wap_std2'])
book_all_features = pd.concat([stocks_id_pd,row_id_pd,volatility_pd,entropy2_pd,linearFit_pd,linearFit5_pd,linearFit2_pd,
wap_std_pd,wap_std5_pd,wap_std2_pd],axis=1)
# This line makes sure the predictions are aligned with the row_id in the submission file
book_all_features = train_targets_pd.merge(book_all_features, on = ['row_id'])
# Add encoded stock
encoded = list()
for i in range(book_all_features.shape[0]):
stock_id = book_all_features['stock_id'][i]
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(book_all_features.shape[0],np.array(all_stocks_ids).shape[0]))
book_all_features_encoded = pd.concat([book_all_features, encoded_pd],axis=1)
return book_all_features_encoded
def calc_wap(df):
return (df['bid_price1'] * df['ask_size1'] + df['ask_price1'] * df['bid_size1']) / (df['bid_size1'] + df['ask_size1'])
def calc_wap2(df):
return (df['bid_price2'] * df['ask_size2'] + df['ask_price2'] * df['bid_size2']) / (df['bid_size2'] + df['ask_size2'])
def calc_wap3(df):
return (df['bid_price2'] * df['bid_size2'] + df['ask_price2'] * df['ask_size2']) / (df['bid_size2'] + df['ask_size2'])
def calc_wap4(df):
return (df['bid_price1'] * df['bid_size1'] + df['ask_price1'] * df['ask_size1']) / (df['bid_size1'] + df['ask_size1'])
def mid_price(df):
return df['bid_price1'] /2 + df['ask_price1'] / 2
def calc_rv_from_wap_numba(values, index):
log_return = np.diff(np.log(values))
realized_vol = np.sqrt(np.sum(np.square(log_return[1:])))
return realized_vol
def load_book_data_by_id(stock_id,datapath,train_test):
file_to_read = os.path.join(datapath,'book_' + str(train_test) + str('.parquet'),'stock_id=' + str(stock_id))
df = pd.read_parquet(file_to_read)
return df
def load_trades_data_by_id(stock_id,datapath,train_test):
file_to_read = os.path.join(datapath,'trade_' + str(train_test) + str('.parquet'),'stock_id=' + str(stock_id))
df = pd.read_parquet(file_to_read)
return df
def entropy_from_df(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_df2(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap2'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_df3(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap3'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def financial_metrics(df):
wap_imbalance = np.mean(df['wap'] - df['wap2'])
price_spread = np.mean((df['ask_price1'] - df['bid_price1']) / ((df['ask_price1'] + df['bid_price1'])/2))
bid_spread = np.mean(df['bid_price1'] - df['bid_price2'])
ask_spread = np.mean(df['ask_price1'] - df['ask_price2']) # Abs to take
total_volume = np.mean((df['ask_size1'] + df['ask_size2']) + (df['bid_size1'] + df['bid_size2']))
volume_imbalance = np.mean(abs((df['ask_size1'] + df['ask_size2']) - (df['bid_size1'] + df['bid_size2'])))
return [wap_imbalance,price_spread,bid_spread,ask_spread,total_volume,volume_imbalance]
def financial_metrics_2(df):
wap_imbalance = df['wap'] - df['wap2']
price_spread = (df['ask_price1'] - df['bid_price1']) / ((df['ask_price1'] + df['bid_price1'])/2)
bid_spread = df['bid_price1'] - df['bid_price2']
ask_spread = df['ask_price1'] - df['ask_price2'] # Abs to take
total_volume = (df['ask_size1'] + df['ask_size2']) + (df['bid_size1'] + df['bid_size2'])
volume_imbalance = abs((df['ask_size1'] + df['ask_size2']) - (df['bid_size1'] + df['bid_size2']))
# New features here
wap_imbalance_mean = np.mean(wap_imbalance)
wap_imbalance_sum = np.sum(wap_imbalance)
wap_imbalance_std = np.std(wap_imbalance)
wap_imbalance_max = np.max(wap_imbalance)
wap_imbalance_min = np.min(wap_imbalance)
price_spread_mean = np.mean(price_spread)
price_spread_sum = np.sum(price_spread)
price_spread_std = np.std(price_spread)
price_spread_max = np.max(price_spread)
price_spread_min = np.min(price_spread)
bid_spread_mean = np.mean(bid_spread)
bid_spread_sum = np.sum(bid_spread)
bid_spread_std = np.std(bid_spread)
bid_spread_max = np.max(bid_spread)
bid_spread_min = np.min(bid_spread)
ask_spread_mean = np.mean(ask_spread)
ask_spread_sum = np.sum(ask_spread)
ask_spread_std = np.std(ask_spread)
ask_spread_max = np.max(ask_spread)
ask_spread_min = np.min(ask_spread)
total_volume_mean = np.mean(total_volume)
total_volume_sum = np.sum(total_volume)
total_volume_std = np.std(total_volume)
total_volume_max = np.max(total_volume)
total_volume_min = np.min(total_volume)
volume_imbalance_mean = np.mean(volume_imbalance)
volume_imbalance_sum = np.sum(volume_imbalance)
volume_imbalance_std = np.std(volume_imbalance)
volume_imbalance_max = np.max(volume_imbalance)
volume_imbalance_min = np.min(volume_imbalance)
return [wap_imbalance_mean,price_spread_mean,bid_spread_mean,ask_spread_mean,total_volume_mean,volume_imbalance_mean, wap_imbalance_sum,price_spread_sum,bid_spread_sum,ask_spread_sum,total_volume_sum,volume_imbalance_sum, wap_imbalance_std,price_spread_std,bid_spread_std,ask_spread_std,total_volume_std,volume_imbalance_std, wap_imbalance_max,price_spread_max,bid_spread_max,ask_spread_max,total_volume_max,volume_imbalance_max, wap_imbalance_min,price_spread_min,bid_spread_min,ask_spread_min,total_volume_min,volume_imbalance_min]
def other_metrics(df):
if df.shape[0] < 2:
linearFit = 0
linearFit2 = 0
linearFit3 = 0
std_1 = 0
std_2 = 0
std_3 = 0
else:
linearFit = (df['wap'].iloc[-1] - df['wap'].iloc[0]) / ((np.max(df['seconds_in_bucket']) - np.min(df['seconds_in_bucket'])))
linearFit2 = (df['wap2'].iloc[-1] - df['wap2'].iloc[0]) / ((np.max(df['seconds_in_bucket']) - np.min(df['seconds_in_bucket'])))
linearFit3 = (df['wap3'].iloc[-1] - df['wap3'].iloc[0]) / ((np.max(df['seconds_in_bucket']) - np.min(df['seconds_in_bucket'])))
# Resampling
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap'], kind='nearest')
nearest2 = interp1d(t_init, df['wap2'], kind='nearest')
nearest3 = interp1d(t_init, df['wap3'], kind='nearest')
std_1 = np.std(nearest(t_new))
std_2 = np.std(nearest2(t_new))
std_3 = np.std(nearest3(t_new))
return [linearFit, linearFit2, linearFit3, std_1, std_2, std_3]
def load_book_data_by_id_kaggle(stock_id,train_test):
df = pd.read_parquet(f'../input/optiver-realized-volatility-prediction/book_{train_test}.parquet/stock_id={stock_id}')
return df
def load_trades_data_by_id_kaggle(stock_id,train_test):
df = pd.read_parquet(f'../input/optiver-realized-volatility-prediction/trade_{train_test}.parquet/stock_id={stock_id}')
return df
def computeFeatures_wEntropy(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
# Calculate realized volatility
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3']],axis=1)
df_sub = df_sub.rename(columns={'time_id':'row_id','wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3'})
# Calculate realized volatility last 5 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_5['time_id']]
df_sub_5 = pd.concat([df_sub_5,df_sub2_5['wap2'],df_sub3_5['wap3']],axis=1)
df_sub_5 = df_sub_5.rename(columns={'time_id':'row_id','wap': 'rv_5', 'wap2': 'rv2_5', 'wap3': 'rv3_5'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_5'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_5'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_5'])
df_sub_5 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
# Calculate realized volatility last 2 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_sub_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_2['time_id']]
df_sub_2 = pd.concat([df_sub_2,df_sub2_2['wap2'],df_sub3_2['wap3']],axis=1)
df_sub_2 = df_sub_2.rename(columns={'time_id':'row_id','wap': 'rv_2', 'wap2': 'rv2_2', 'wap3': 'rv3_2'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_2'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_2'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_2'])
df_sub_2 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
list_rv.append(df_sub)
list_rv2.append(df_sub_5)
list_rv3.append(df_sub_2)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_book_feats5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={0:'embedding'})
df_sub_book_feats5[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats5.embedding.tolist(), index=df_sub_book_feats5.index)
df_sub_book_feats5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats5['time_id']]
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['wap_imbalance5'])
temp2 = pd.DataFrame([0],columns=['price_spread5'])
temp3 = pd.DataFrame([0],columns=['bid_spread5'])
temp4 = pd.DataFrame([0],columns=['ask_spread5'])
temp5 = pd.DataFrame([0],columns=['total_vol5'])
temp6 = pd.DataFrame([0],columns=['vol_imbalance5'])
df_sub_book_feats5 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_fin.append(df_sub_book_feats)
list_fin2.append(df_sub_book_feats5)
# Compute entropy
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_ent = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(entropy_from_df).to_frame().reset_index().fillna(0)
df_ent2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(entropy_from_df2).to_frame().reset_index().fillna(0)
df_ent3 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(entropy_from_df3).to_frame().reset_index().fillna(0)
df_ent['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_ent['time_id']]
df_ent = df_ent.rename(columns={'time_id':'row_id',0:'entropy'})
df_ent2 = df_ent2.rename(columns={0:'entropy2'}).drop(['time_id'],axis=1)
df_ent3 = df_ent3.rename(columns={0:'entropy3'}).drop(['time_id'],axis=1)
df_ent = pd.concat([df_ent,df_ent2,df_ent3],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['entropy'])
temp2 = pd.DataFrame([0],columns=['entropy2'])
temp3 = pd.DataFrame([0],columns=['entropy3'])
df_ent = pd.concat([times_pd,temp,temp2,temp3],axis=1)
list_ent.append(df_ent)
# Compute other metrics
df_others = book_stock.groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others = df_others.rename(columns={0:'embedding'})
df_others[['linearFit1_1','linearFit1_2','linearFit1_3','wap_std1_1','wap_std1_2','wap_std1_3']] = pd.DataFrame(df_others.embedding.tolist(), index=df_others.index)
df_others['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others['time_id']]
df_others = df_others.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_others.append(df_others)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_others2 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others2 = df_others2.rename(columns={0:'embedding'})
df_others2[['linearFit2_1','linearFit2_2','linearFit2_3','wap_std2_1','wap_std2_2','wap_std2_3']] = pd.DataFrame(df_others2.embedding.tolist(), index=df_others2.index)
df_others2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others2['time_id']]
df_others2 = df_others2.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit2_1'])
temp2 = pd.DataFrame([0],columns=['linearFit2_2'])
temp3 = pd.DataFrame([0],columns=['linearFit2_3'])
temp4 = pd.DataFrame([0],columns=['wap_std2_1'])
temp5 = pd.DataFrame([0],columns=['wap_std2_2'])
temp6 = pd.DataFrame([0],columns=['wap_std2_3'])
df_others2 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others2.append(df_others2)
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_others3 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others3 = df_others3.rename(columns={0:'embedding'})
df_others3[['linearFit3_1','linearFit3_2','linearFit3_3','wap_std3_1','wap_std3_2','wap_std3_3']] = pd.DataFrame(df_others3.embedding.tolist(), index=df_others3.index)
df_others3['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others3['time_id']]
df_others3 = df_others3.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit3_1'])
temp2 = pd.DataFrame([0],columns=['linearFit3_2'])
temp3 = pd.DataFrame([0],columns=['linearFit3_3'])
temp4 = pd.DataFrame([0],columns=['wap_std3_1'])
temp5 = pd.DataFrame([0],columns=['wap_std3_2'])
temp6 = pd.DataFrame([0],columns=['wap_std3_3'])
df_others3 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others3.append(df_others3)
print('Computing one stock took', time.time() - start, 'seconds for stock ', stock_id)
# Create features dataframe
df_submission = pd.concat(list_rv)
df_submission2 = pd.concat(list_rv2)
df_submission3 = pd.concat(list_rv3)
df_ent_concat = pd.concat(list_ent)
df_fin_concat = pd.concat(list_fin)
df_fin2_concat = pd.concat(list_fin2)
df_others = pd.concat(list_others)
df_others2 = pd.concat(list_others2)
df_others3 = pd.concat(list_others3)
df_book_features = df_submission.merge(df_submission2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_submission3, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_ent_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin2_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others3, on = ['row_id'], how='left').fillna(0)
# Add encoded stock
encoder = np.eye(len(all_stocks_ids))
encoded = list()
for i in range(df_book_features.shape[0]):
stock_id = int(df_book_features['row_id'][i].split('-')[0])
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(df_book_features.shape[0],np.array(all_stocks_ids).shape[0]))
df_book_features_encoded = pd.concat([df_book_features, encoded_pd],axis=1)
return df_book_features_encoded
def computeFeatures_july(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
# Calculate realized volatility
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3']],axis=1)
df_sub = df_sub.rename(columns={'time_id':'row_id','wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3'})
# Calculate realized volatility last 5 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_5['time_id']]
df_sub_5 = pd.concat([df_sub_5,df_sub2_5['wap2'],df_sub3_5['wap3']],axis=1)
df_sub_5 = df_sub_5.rename(columns={'time_id':'row_id','wap': 'rv_5', 'wap2': 'rv2_5', 'wap3': 'rv3_5'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_5'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_5'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_5'])
df_sub_5 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
# Calculate realized volatility last 2 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_sub_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_2['time_id']]
df_sub_2 = pd.concat([df_sub_2,df_sub2_2['wap2'],df_sub3_2['wap3']],axis=1)
df_sub_2 = df_sub_2.rename(columns={'time_id':'row_id','wap': 'rv_2', 'wap2': 'rv2_2', 'wap3': 'rv3_2'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_2'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_2'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_2'])
df_sub_2 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
list_rv.append(df_sub)
list_rv2.append(df_sub_5)
list_rv3.append(df_sub_2)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics_2).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_book_feats5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics_2).to_frame().reset_index()
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={0:'embedding'})
df_sub_book_feats5[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats5.embedding.tolist(), index=df_sub_book_feats5.index)
df_sub_book_feats5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats5['time_id']]
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['wap_imbalance5'])
temp2 = pd.DataFrame([0],columns=['price_spread5'])
temp3 = pd.DataFrame([0],columns=['bid_spread5'])
temp4 = pd.DataFrame([0],columns=['ask_spread5'])
temp5 = pd.DataFrame([0],columns=['total_vol5'])
temp6 = pd.DataFrame([0],columns=['vol_imbalance5'])
df_sub_book_feats5 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_fin.append(df_sub_book_feats)
list_fin2.append(df_sub_book_feats5)
# Compute other metrics
df_others = book_stock.groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others = df_others.rename(columns={0:'embedding'})
df_others[['linearFit1_1','linearFit1_2','linearFit1_3','wap_std1_1','wap_std1_2','wap_std1_3']] = pd.DataFrame(df_others.embedding.tolist(), index=df_others.index)
df_others['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others['time_id']]
df_others = df_others.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_others.append(df_others)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_others2 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others2 = df_others2.rename(columns={0:'embedding'})
df_others2[['linearFit2_1','linearFit2_2','linearFit2_3','wap_std2_1','wap_std2_2','wap_std2_3']] = pd.DataFrame(df_others2.embedding.tolist(), index=df_others2.index)
df_others2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others2['time_id']]
df_others2 = df_others2.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit2_1'])
temp2 = pd.DataFrame([0],columns=['linearFit2_2'])
temp3 = pd.DataFrame([0],columns=['linearFit2_3'])
temp4 = pd.DataFrame([0],columns=['wap_std2_1'])
temp5 = pd.DataFrame([0],columns=['wap_std2_2'])
temp6 = pd.DataFrame([0],columns=['wap_std2_3'])
df_others2 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others2.append(df_others2)
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_others3 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others3 = df_others3.rename(columns={0:'embedding'})
df_others3[['linearFit3_1','linearFit3_2','linearFit3_3','wap_std3_1','wap_std3_2','wap_std3_3']] = pd.DataFrame(df_others3.embedding.tolist(), index=df_others3.index)
df_others3['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others3['time_id']]
df_others3 = df_others3.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit3_1'])
temp2 = pd.DataFrame([0],columns=['linearFit3_2'])
temp3 = pd.DataFrame([0],columns=['linearFit3_3'])
temp4 = pd.DataFrame([0],columns=['wap_std3_1'])
temp5 = pd.DataFrame([0],columns=['wap_std3_2'])
temp6 = pd.DataFrame([0],columns=['wap_std3_3'])
df_others3 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others3.append(df_others3)
print('Computing one stock took', time.time() - start, 'seconds for stock ', stock_id)
# Create features dataframe
df_submission = pd.concat(list_rv)
df_submission2 = pd.concat(list_rv2)
df_submission3 = pd.concat(list_rv3)
df_ent_concat = pd.concat(list_ent)
df_fin_concat = pd.concat(list_fin)
df_fin2_concat = pd.concat(list_fin2)
df_others = pd.concat(list_others)
df_others2 = pd.concat(list_others2)
df_others3 = pd.concat(list_others3)
df_book_features = df_submission.merge(df_submission2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_submission3, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_ent_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin2_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others3, on = ['row_id'], how='left').fillna(0)
# Add encoded stock
encoder = np.eye(len(all_stocks_ids))
encoded = list()
for i in range(df_book_features.shape[0]):
stock_id = int(df_book_features['row_id'][i].split('-')[0])
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(df_book_features.shape[0],np.array(all_stocks_ids).shape[0]))
df_book_features_encoded = pd.concat([df_book_features, encoded_pd],axis=1)
return df_book_features_encoded
def computeFeatures_newTest_Laurent(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the entire book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
book_stock['wap4'] = calc_wap2(book_stock)
book_stock['mid_price'] = calc_wap3(book_stock)
# Calculate past realized volatility per time_id
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4 = book_stock.groupby('time_id')['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5 = book_stock.groupby('time_id')['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = df_sub.rename(columns={'time_id':'row_id'})
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3'], df_sub4['wap4'], df_sub5['mid_price']],axis=1)
df_sub = df_sub.rename(columns={'wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3', 'wap4':'rv4','mid_price':'rv5'})
list_rv.append(df_sub)
# Query segments
bucketQuery480 = book_stock.query(f'seconds_in_bucket >= 480')
isEmpty480 = bucketQuery480.empty
bucketQuery300 = book_stock.query(f'seconds_in_bucket >= 300')
isEmpty300 = bucketQuery300.empty
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
# Calculate past realized volatility per time_id and query subset
if isEmpty300 == False:
df_sub_300 = bucketQuery300.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_300 = bucketQuery300.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_300 = bucketQuery300.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_300 = bucketQuery300.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_300 = bucketQuery300.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_300 = pd.concat([times_pd,df_sub_300['wap'],df_sub2_300['wap2'],df_sub3_300['wap3'],df_sub4_300['wap4'],df_sub5_300['mid_price']],axis=1)
df_sub_300 = df_sub_300.rename(columns={'wap': 'rv_300', 'wap2_300': 'rv2', 'wap3_300': 'rv3', 'wap4':'rv4_300','mid_price':'rv5_300'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_300'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_300'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_300'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_300'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_300'])
df_sub_300 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv2.append(df_sub_300)
# Calculate realized volatility last 2 min
if isEmpty480 == False:
df_sub_480 = bucketQuery480.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_480 = bucketQuery480.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_480 = bucketQuery480.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_480 = bucketQuery480.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_480 = bucketQuery480.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_480 = pd.concat([times_pd,df_sub_480['wap'],df_sub2_480['wap2'],df_sub3_480['wap3'],df_sub4_480['wap4'],df_sub5_480['mid_price']],axis=1)
df_sub_480 = df_sub_480.rename(columns={'wap': 'rv_480', 'wap2_480': 'rv2', 'wap3_480': 'rv3', 'wap4':'rv4_480','mid_price':'rv5_480'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_480'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_480'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_480'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_480'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_480'])
df_sub_480 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv3.append(df_sub_480)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_fin.append(df_sub_book_feats)
if isEmpty300 == False:
df_sub_book_feats_300 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={0:'embedding'})
df_sub_book_feats_300[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats_300.embedding.tolist(), index=df_sub_book_feats_300.index)
df_sub_book_feats_300['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats_300['time_id']]
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['wap_imbalance5'])
temp2 = pd.DataFrame([0],columns=['price_spread5'])
temp3 = pd.DataFrame([0],columns=['bid_spread5'])
temp4 = pd.DataFrame([0],columns=['ask_spread5'])
temp5 = pd.DataFrame([0],columns=['total_vol5'])
temp6 = pd.DataFrame([0],columns=['vol_imbalance5'])
df_sub_book_feats_300 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_fin2.append(df_sub_book_feats_300)
print('Computing one stock took', time.time() - start, 'seconds for stock ', stock_id)
# Create features dataframe
df_submission = pd.concat(list_rv)
df_submission2 = pd.concat(list_rv2)
df_submission3 = pd.concat(list_rv3)
df_fin_concat = pd.concat(list_fin)
df_fin2_concat = pd.concat(list_fin2)
df_book_features = df_submission.merge(df_submission2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_submission3, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin2_concat, on = ['row_id'], how='left').fillna(0)
# Add encoded stock
encoder = np.eye(len(all_stocks_ids))
encoded = list()
for i in range(df_book_features.shape[0]):
stock_id = int(df_book_features['row_id'][i].split('-')[0])
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(df_book_features.shape[0],np.array(all_stocks_ids).shape[0]))
df_book_features_encoded = pd.concat([df_book_features, encoded_pd],axis=1)
return df_book_features_encoded
def computeFeatures_newTest_Laurent_noCode(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the entire book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
book_stock['wap4'] = calc_wap2(book_stock)
book_stock['mid_price'] = calc_wap3(book_stock)
# Calculate past realized volatility per time_id
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4 = book_stock.groupby('time_id')['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5 = book_stock.groupby('time_id')['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = df_sub.rename(columns={'time_id':'row_id'})
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3'], df_sub4['wap4'], df_sub5['mid_price']],axis=1)
df_sub = df_sub.rename(columns={'wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3', 'wap4':'rv4','mid_price':'rv5'})
list_rv.append(df_sub)
# Query segments
bucketQuery480 = book_stock.query(f'seconds_in_bucket >= 480')
isEmpty480 = bucketQuery480.empty
bucketQuery300 = book_stock.query(f'seconds_in_bucket >= 300')
isEmpty300 = bucketQuery300.empty
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
# Calculate past realized volatility per time_id and query subset
if isEmpty300 == False:
df_sub_300 = bucketQuery300.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_300 = bucketQuery300.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_300 = bucketQuery300.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_300 = bucketQuery300.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_300 = bucketQuery300.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_300 = pd.concat([times_pd,df_sub_300['wap'],df_sub2_300['wap2'],df_sub3_300['wap3'],df_sub4_300['wap4'],df_sub5_300['mid_price']],axis=1)
df_sub_300 = df_sub_300.rename(columns={'wap': 'rv_300', 'wap2_300': 'rv2', 'wap3_300': 'rv3', 'wap4':'rv4_300','mid_price':'rv5_300'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_300'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_300'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_300'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_300'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_300'])
df_sub_300 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv2.append(df_sub_300)
# Calculate realized volatility last 2 min
if isEmpty480 == False:
df_sub_480 = bucketQuery480.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_480 = bucketQuery480.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_480 = bucketQuery480.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_480 = bucketQuery480.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_480 = bucketQuery480.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_480 = pd.concat([times_pd,df_sub_480['wap'],df_sub2_480['wap2'],df_sub3_480['wap3'],df_sub4_480['wap4'],df_sub5_480['mid_price']],axis=1)
df_sub_480 = df_sub_480.rename(columns={'wap': 'rv_480', 'wap2_480': 'rv2', 'wap3_480': 'rv3', 'wap4':'rv4_480','mid_price':'rv5_480'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_480'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_480'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_480'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_480'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_480'])
df_sub_480 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv3.append(df_sub_480)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_fin.append(df_sub_book_feats)
if isEmpty300 == False:
df_sub_book_feats_300 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={0:'embedding'})
df_sub_book_feats_300[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats_300.embedding.tolist(), index=df_sub_book_feats_300.index)
df_sub_book_feats_300['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats_300['time_id']]
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['wap_imbalance5'])
temp2 = pd.DataFrame([0],columns=['price_spread5'])
temp3 = pd.DataFrame([0],columns=['bid_spread5'])
temp4 = pd.DataFrame([0],columns=['ask_spread5'])
temp5 = pd.DataFrame([0],columns=['total_vol5'])
temp6 = pd.DataFrame([0],columns=['vol_imbalance5'])
df_sub_book_feats_300 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_fin2.append(df_sub_book_feats_300)
print('Computing one stock took', time.time() - start, 'seconds for stock ', stock_id)
# Create features dataframe
df_submission = | pd.concat(list_rv) | pandas.concat |
import pandas as pd
import logging
import electricitylci.model_config as config
formatter = logging.Formatter(
"%(levelname)s:%(filename)s:%(funcName)s:%(message)s"
)
logging.basicConfig(
format="%(levelname)s:%(filename)s:%(funcName)s:%(message)s",
level=logging.INFO,
)
logger = logging.getLogger("electricitylci")
def get_generation_process_df(regions=None, **kwargs):
"""
Create a dataframe of emissions from power generation by fuel type in each
region. kwargs would include the upstream emissions dataframe (upstream_df) if
upstream emissions are being included.
Parameters
----------
regions : str, optional
Regions to include in the analysis (the default is None, which uses the value
read from a settings YAML file). Other options include "eGRID", "NERC", "BA",
"US", "FERC", and "EIA"
Returns
-------
DataFrame
Each row represents information about a single emission from a fuel category
in a single region. Columns are:
'Subregion', 'FuelCategory', 'FlowName', 'FlowUUID', 'Compartment',
'Year', 'Source', 'Unit', 'ElementaryFlowPrimeContext',
'TechnologicalCorrelation', 'TemporalCorrelation', 'DataCollection',
'Emission_factor', 'Reliability_Score', 'GeographicalCorrelation',
'GeomMean', 'GeomSD', 'Maximum', 'Minimum'
"""
from electricitylci.generation import create_generation_process_df
from electricitylci.combinator import concat_clean_upstream_and_plant
if config.model_specs.include_renewable_generation is True:
generation_process_df=get_gen_plus_netl()
else:
generation_process_df = create_generation_process_df()
if config.model_specs.include_netl_water is True:
import electricitylci.plant_water_use as water
water_df = water.generate_plant_water_use(config.model_specs.eia_gen_year)
generation_process_df=concat_clean_upstream_and_plant(generation_process_df,water_df)
if config.model_specs.include_upstream_processes is True:
try:
upstream_df = kwargs['upstream_df']
upstream_dict = kwargs['upstream_dict']
except KeyError:
print(
"A kwarg named 'upstream_dict' must be included if include_upstream_processes"
"is True"
)
# upstream_dict = write_upstream_process_database_to_dict(
# upstream_df
# )
# upstream_dict = write_upstream_dicts_to_jsonld(upstream_dict)
combined_df, canadian_gen = combine_upstream_and_gen_df(
generation_process_df, upstream_df
)
gen_plus_fuels = add_fuels_to_gen(
generation_process_df, upstream_df, canadian_gen, upstream_dict
)
else:
import electricitylci.import_impacts as import_impacts
canadian_gen_df = import_impacts.generate_canadian_mixes(generation_process_df)
generation_process_df = pd.concat([generation_process_df, canadian_gen_df], ignore_index=True)
gen_plus_fuels=generation_process_df
#This change has been made to accomodate the new method of generating
#consumption mixes for FERC regions. They now pull BAs to provide
#a more accurate inventory. The tradeoff here is that it's no longer possible
#to make a FERC region generation mix and also provide the consumption mix.
#Or it could be possible but would requir running through aggregate twice.
# generation_process_df = aggregate_gen(
# gen_plus_fuels, subregion=regions
# )
if regions is None:
regions = config.model_specs.regional_aggregation
if regions in ["BA","FERC","US"]:
generation_process_df = aggregate_gen(
gen_plus_fuels, subregion="BA"
)
else:
generation_process_df = aggregate_gen(
gen_plus_fuels, subregion=regions
)
return generation_process_df
def get_generation_mix_process_df(regions=None):
"""
Create a dataframe of generation mixes by fuel type in each subregion.
This function imports and uses the parameter 'replace_egrid' and
'gen_mix_from_model_generation_data' from model_config.py. If 'replace_egrid'
is true or the specified 'regions' is true, then the generation mix will
come from EIA 923 data. If 'replace_egrid' is false then the generation
mix will either come from the eGRID reference data
('gen_mix_from_model_generation_data' is false) or from the generation data
from this model ('gen_mix_from_model_generation_data' is true).
Parameters
----------
regions : str, optional
Which regions to include (the default is 'all', which includes all eGRID
subregions)
Returns
-------
DataFrame
Sample output:
>>> all_gen_mix_db.head()
Subregion FuelCategory Electricity NERC Generation_Ratio
0 AKGD COAL 5.582922e+05 ASCC 0.116814
22 AKGD OIL 3.355753e+05 ASCC 0.070214
48 AKGD GAS 3.157474e+06 ASCC 0.660651
90 AKGD HYDRO 5.477350e+05 ASCC 0.114605
114 AKGD BIOMASS 5.616577e+04 ASCC 0.011752
"""
from electricitylci.egrid_filter import (
electricity_for_selected_egrid_facilities,
)
from electricitylci.generation_mix import (
create_generation_mix_process_df_from_model_generation_data,
create_generation_mix_process_df_from_egrid_ref_data,
)
from electricitylci.eia923_generation import build_generation_data
if regions is None:
regions = config.model_specs.regional_aggregation
if config.model_specs.replace_egrid or regions in ["BA","FERC","US"]:
# assert regions == 'BA' or regions == 'NERC', 'Regions must be BA or NERC'
if regions in ["BA","FERC","US"] and not config.model_specs.replace_egrid:
logger.info(
f"EIA923 generation data is being used for the generation mix "
f"despite replace_egrid = False. The reference eGrid electricity "
f"data cannot be reorgnznied to match BA or FERC regions. For "
f"the US region, the function for generating US mixes does not "
f"support aggregating to the US."
)
print("EIA923 generation data is used when replacing eGRID")
generation_data = build_generation_data(
generation_years=[config.model_specs.eia_gen_year]
)
generation_mix_process_df = create_generation_mix_process_df_from_model_generation_data(
generation_data, regions
)
else:
if config.model_specs.gen_mix_from_model_generation_data:
generation_mix_process_df = create_generation_mix_process_df_from_model_generation_data(
electricity_for_selected_egrid_facilities, regions
)
else:
generation_mix_process_df = create_generation_mix_process_df_from_egrid_ref_data(
regions
)
return generation_mix_process_df
def write_generation_process_database_to_dict(gen_database, regions=None):
"""
Create olca formatted dictionaries of individual processes
Parameters
----------
gen_database : DataFrame
Each row represents information about a single emission from a fuel category
in a single region.
regions : str, optional
Not currently used (the default is 'all', which [default_description])
Returns
-------
dict
A dictionary of dictionaries, each of which contains information about
emissions from a single fuel type in a single region.
"""
from electricitylci.generation import olcaschema_genprocess
if regions is None:
regions = config.model_specs.regional_aggregation
gen_dict = olcaschema_genprocess(gen_database, subregion=regions)
return gen_dict
def write_generation_mix_database_to_dict(
genmix_database, gen_dict, regions=None
):
from electricitylci.generation_mix import olcaschema_genmix
if regions is None:
regions = config.model_specs.regional_aggregation
if regions in ["FERC","US","BA"]:
genmix_dict = olcaschema_genmix(
genmix_database, gen_dict, subregion="BA"
)
else:
genmix_dict = olcaschema_genmix(
genmix_database, gen_dict, subregion=regions
)
return genmix_dict
def write_surplus_pool_and_consumption_mix_dict():
"""
Create olca formatted dictionaries for the consumption mix as calculated by
consumption_mix.py. Note that this funcion directly pulls the dataframes,
converts the data into the dictionary and then returns the dictionary.
Returns
-------
dictionary
The surplus pool and consumption mixes for the various regions.
"""
from electricitylci.consumption_mix import surplus_dict
from electricitylci.consumption_mix import consumption_dict
surplus_pool_and_con_mix = {**surplus_dict, **consumption_dict}
return surplus_pool_and_con_mix
def write_distribution_dict():
from electricitylci.distribution import distribution_mix_dictionary
return distribution_mix_dictionary()
def write_process_dicts_to_jsonld(*process_dicts):
"""
Send one or more process dictionaries to be written to json-ld
"""
from electricitylci.olca_jsonld_writer import write
all_process_dicts = dict()
for d in process_dicts:
all_process_dicts = {**all_process_dicts, **d}
olca_dicts = write(all_process_dicts, config.model_specs.namestr)
return olca_dicts
def get_upstream_process_df(eia_gen_year):
"""
Automatically load all of the upstream emissions data from the various
modules. Will return a dataframe with upstream emissions from
coal, natural gas, petroleum, nuclear, and plant construction.
"""
import electricitylci.coal_upstream as coal
import electricitylci.natural_gas_upstream as ng
import electricitylci.petroleum_upstream as petro
import electricitylci.nuclear_upstream as nuke
import electricitylci.power_plant_construction as const
from electricitylci.combinator import concat_map_upstream_databases
print("Generating upstream inventories...")
coal_df = coal.generate_upstream_coal(eia_gen_year)
ng_df = ng.generate_upstream_ng(eia_gen_year)
petro_df = petro.generate_petroleum_upstream(eia_gen_year)
nuke_df = nuke.generate_upstream_nuc(eia_gen_year)
const = const.generate_power_plant_construction(eia_gen_year)
#coal and ng already conform to mapping so no mapping needed
upstream_df = concat_map_upstream_databases(eia_gen_year,
petro_df, nuke_df, const
)
upstream_df=pd.concat([upstream_df,coal_df,ng_df],sort=False,ignore_index=True)
return upstream_df
def write_upstream_process_database_to_dict(upstream_df):
"""
Convert the upstream dataframe generated by get_upstream_process_df to
dictionaries to be written to json-ld.
Parameters
----------
upstream_df : dataframe
Combined dataframe as generated by gen_upstream_process_df
Returns
-------
dictionary
"""
import electricitylci.upstream_dict as upd
print("Writing upstream processes to dictionaries")
upstream_dicts = upd.olcaschema_genupstream_processes(upstream_df)
return upstream_dicts
def write_upstream_dicts_to_jsonld(upstream_dicts):
"""
Write the upstream dictionary to jsonld.
Parameters
----------
upstream_dicts : dictionary
The dictioanary of upstream unit processes generated by
electricitylci.write_upstream_database_to_dict.
"""
upstream_dicts = write_process_dicts_to_jsonld(upstream_dicts)
return upstream_dicts
def combine_upstream_and_gen_df(gen_df, upstream_df):
"""
Combine the generation and upstream dataframes into a single dataframe.
The emissions represented here are the annutal emissions for all power
plants. This dataframe would be suitable for further analysis.
Parameters
----------
gen_df : dataframe
The generator dataframe, generated by get_gen_plus_netl or
get_generation_process_df. Note that get_generation_process_df returns
two dataframes. The intention would be to send the second returned
dataframe (plant-level emissions) to this routine.
upstream_df : dataframe
The upstream dataframe, generated by get_upstream_process_df
"""
import electricitylci.combinator as combine
import electricitylci.import_impacts as import_impacts
print("Combining upstream and generation inventories")
combined_df = combine.concat_clean_upstream_and_plant(gen_df, upstream_df)
canadian_gen = import_impacts.generate_canadian_mixes(combined_df)
combined_df = | pd.concat([combined_df, canadian_gen], ignore_index=True) | pandas.concat |
'''
Functions for processing BC Hydro observation files.
*Raw files are in PST with daylight savings.
'''
import numpy as np
import pandas as pd
from os.path import basename
from datetime import datetime, timedelta
def flag_out(dataframe, col, flag_col, flag_val):
'''
Preserve values that have qc_code=`flag_val`; flag out the rest.
'PREC_INST_RAW_QC' should have flag_val='200'
'PREC_INST_QC_QC' should have flag_val='50'
'''
temp_data = dataframe[col].values
temp_flag = dataframe[flag_col].values
temp_data[temp_flag!=flag_val] = np.nan
dataframe[col] = temp_data
return dataframe
def BCH_txt_preprocess(filename_input, filename_output, cols_num, qc_code, verbose=True):
'''
Converting BC Hydro (old) txt observation files into pandas HDF5.
Input
----------
filename_input: a list of raw txt file names
filename_output: the output filename, a string, e.g., 'NRT_2016_2018.hdf'
cols_num: list of namea of numerical columns, e.g., 'PREC_INST_QC'
verbose: True for print outs
qc_code: code of good quality, the same length as cols_num
Output
----------
keys: a list of stations been processed
keys_stored: a list of stations been stored (have non-NaN values)
'''
cols_need = []
for col in cols_num:
cols_need.append(col)
cols_need.append(col+'_QC') # add qc code into extracted cols
cols_need = ['datetime',] + cols_need
keys = []; keys_stored = []
with pd.HDFStore(filename_output, 'w') as hdf_io:
# loop over txt files
for name in filename_input:
# Assuming good at the begining
Bad = False
# txt to pandas
data = pd.read_csv(name, sep='\t', skiprows=[0])
# use filename (first three letters) as hdf variable keys
key = basename(name)[0:3]
keys.append(key);
if verbose:
print(key)
# collection all detected columns
cols = list(data.columns)
# rename the datetime column
cols[1] = 'datetime'
data.columns = cols
# check missing in the needed columns
for col_need in cols_need:
if np.logical_not(col_need in cols):
if verbose:
print('\t"{}" missing'.format(col_need))
Bad = True;
if Bad:
if verbose:
print('\tInsufficient data')
continue;
# subset to needed columns
data_subset = data[cols_need].copy()
# collecting numerical values from needed columns
L = len(data_subset)
for col_num in cols_num:
temp_data = np.empty(L) # allocation
temp_string = data_subset[col_num]
for i, string in enumerate(temp_string):
# column could be a 'float number' or '+' for NaN
try:
temp_data[i] = np.float(string)
except:
# "try: failed = got "+" symbol
temp_data[i] = np.nan
# replace raw strings by the converted values
data_subset[col_num] = temp_data
# flag out values based on the qc code
if qc_code is not None:
for i, col in enumerate(cols_num):
data_subset = flag_out(data_subset, col, col+'_QC', flag_val=qc_code[i])
# drop rows that contain NaN values
data_subset = data_subset.dropna(how='any')
# if found "0.0" in datetime col, mark as NaN and drop the row
for i, date_vals in enumerate(data_subset['datetime']):
if date_vals == "0.0":
if verbose:
print("\tFound bad datetime values, drop row {}".format(i))
data_subset = data_subset.drop(data_subset.index[i])
# converting datetime string to pandas datetime after cleaning
data_subset['datetime'] = pd.to_datetime(data_subset['datetime'])
# check the number of remained columns
L = len(data_subset)
if L < 2:
if verbose:
print('\tInsufficient data after value cleaning, L={}'.format(L))
continue;
# observational times as ending times
# calculating the time diff for resmapling
freq = np.empty(L)
for i in range(L-1):
freq[i+1] = (data_subset['datetime'].iloc[i+1] - data_subset['datetime'].iloc[i]).total_seconds()
freq[0] = freq[1]
data_subset['FREQ'] = freq
# dropna changes index, here reset the pandas index
data_out = data_subset.reset_index().drop(['index'], axis=1)
# rename all pre-processed columns
data_out.columns = cols_need + ['FREQ',]
# save into the hdf
hdf_io[key] = data_out
keys_stored.append(key)
return keys, keys_stored
def BCH_xls_preprocess(filename_input, filename_output, cols_num, verbose=True):
'''
Converting BC Hydro xls observation files into pandas HDF5.
Input
----------
filename_input: a list of xls file names
filename_output: the output filename, a string, e.g., 'NRT_2019.hdf'
cols_num: name of the single numerical column, e.g., 'PREC_INST_QC'
verbose: True for print outs
Output
----------
keys: a list of stations been processed
'''
keys = []
with pd.HDFStore(filename_output, 'w') as hdf_io:
for name in filename_input:
pd_temp = | pd.read_excel(name) | pandas.read_excel |
from tutorial.main.stepbystep.stepbysteputils.pgconnector import create_engine_ready
from suricate.data.companies import getsource, gettarget
import pandas as pd
import numpy as np
engine = create_engine_ready()
# filefolder = '~/'
# leftpath = 'source.csv'
# rightpath = 'target.csv'
# df_source = pd.read_csv(filefolder + leftpath, index_col=0, sep='|', encoding='utf-8')
# df_target = pd.read_csv(filefolder + rightpath, index_col=0, sep='|', encoding='utf-8')
df_source_raw = getsource(nrows=500)
df_target_raw = gettarget(nrows=None)
from sklearn.model_selection import train_test_split
def rebuild_ytrue(ix):
y_true_saved = pd.read_sql(sql="SELECT * FROM y_true WHERE y_true.y_true = 1", con=engine).set_index(
['ix_source', 'ix_target'],
drop=True)['y_true']
y = pd.Series(index=ix, data = np.zeros(shape=len(ix)), name='y_true')
ix_common = y_true_saved.index.intersection(ix)
y.loc[ix_common] = y_true_saved.loc[ix_common]
return y
def prepare_source(df):
"""
Args:
df:
Returns:
pd.DataFrame
"""
df2 = df
return df2
def prepare_target(df):
"""
Args:
df:
Returns:
pd.DataFrame
"""
df2 = df
return df2
df_source = prepare_source(df_source_raw)
df_target = prepare_target(df_target_raw)
assert df_source.columns.equals(df_target.columns)
print(pd.datetime.now(),' | ', 'number of rows on left:{}'.format(df_source.shape[0]))
print(pd.datetime.now(),' | ', 'number of rows on right:{}'.format(df_target.shape[0]))
import pandas as pd
from tutorial.main.stepbystep.stepbysteputils.esconnector import getesconnector
escon = getesconnector()
from suricate.sbstransformers import SbsApplyComparator
from sklearn.pipeline import FeatureUnion
_sbs_score_list = [
('name_fuzzy', SbsApplyComparator(on='name', comparator='simple')),
('street_fuzzy', SbsApplyComparator(on='street', comparator='simple')),
('name_token', SbsApplyComparator(on='name', comparator='token')),
('street_token', SbsApplyComparator(on='street', comparator='token')),
('city_fuzzy', SbsApplyComparator(on='city', comparator='simple')),
('postalcode_fuzzy', SbsApplyComparator(on='postalcode', comparator='simple')),
('postalcode_contains', SbsApplyComparator(on='postalcode', comparator='contains'))
]
scorer_sbs = FeatureUnion(transformer_list=_sbs_score_list)
from suricate.pipeline import PartialClf
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import Normalizer
from sklearn.decomposition import PCA
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import precision_score, recall_score, accuracy_score
pipe = Pipeline(steps=[
('Impute', SimpleImputer(strategy='constant', fill_value=0)),
('Scaler', Normalizer()),
('PCA', PCA(n_components=4)),
('Predictor', GradientBoostingClassifier(n_estimators=2000))
])
pred = PartialClf(classifier=pipe)
left_train, left_test = train_test_split(df_source_raw, train_size=0.5)
Xst_train = escon.fit_transform(X=left_train)
ix_con_train = Xst_train.index
Xsbs_train = escon.getsbs(X=left_train, on_ix=ix_con_train)
scores_further_train = scorer_sbs.fit_transform(X=Xsbs_train)
scores_further_train = pd.DataFrame(data=scores_further_train, index=ix_con_train, columns=[c[0] for c in _sbs_score_list])
scores_further_train = pd.concat([Xst_train[['es_score']], scores_further_train], axis=1, ignore_index=False)
y_true_train = rebuild_ytrue(ix=ix_con_train)
pred.fit(X=scores_further_train, y=y_true_train)
y_pred_train = pred.predict(X=scores_further_train)
print(pd.datetime.now(),' | ', 'Scores on training data')
print(pd.datetime.now(),' | ', 'accuracy: {}'.format(accuracy_score(y_true=y_true_train, y_pred=y_pred_train)))
print(pd.datetime.now(),' | ', 'precision: {}'.format(precision_score(y_true=y_true_train, y_pred=y_pred_train)))
print(pd.datetime.now(),' | ', 'recall: {}'.format(recall_score(y_true=y_true_train, y_pred=y_pred_train)))
Xst_test = escon.transform(X=left_test)
ix_con_test = Xst_test.index
Xsbs_test = escon.getsbs(X=left_test, on_ix=ix_con_test)
scores_further_test = scorer_sbs.transform(X=Xsbs_test)
scores_further_test = pd.DataFrame(data=scores_further_test, index=ix_con_test, columns=[c[0] for c in _sbs_score_list])
scores_further_test = pd.concat([Xst_test[['es_score']], scores_further_test], axis=1, ignore_index=False)
y_true_test = rebuild_ytrue(ix=ix_con_test)
y_pred_test = pred.predict(X=scores_further_test)
print(pd.datetime.now(),' | ', 'Scores on testing data')
print(pd.datetime.now(),' | ', 'accuracy: {}'.format(accuracy_score(y_true=y_true_test, y_pred=y_pred_test)))
print(pd.datetime.now(),' | ', 'precision: {}'.format(precision_score(y_true=y_true_test, y_pred=y_pred_test)))
print( | pd.datetime.now() | pandas.datetime.now |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.