prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# Copyright 2019 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#Adapted by <NAME> in November,2019 from this Colab notebook:
#https://colab.research.google.com/github/google-research/bert/blob/master/predicting_movie_reviews_with_bert_on_tf_hub.ipynb.
#Changes includes
# - Reading our stressor data and parsing it properly
# - reconfiguring the last layer to include N neurons corresponding to N categories
# - correcting the probability output so that it follows [0,1] proper pattern
# - better analysis with confusion matrix
# - exporting to pb format for tensorflow serving api
import os
os.environ['LD_LIBRARY_PATH'] = '/usr/local/cuda-10.0/lib64'
os.environ['CUDA_VISIBLE_DEVICES']='0'
import sys
print(sys.executable)
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedShuffleSplit
import pandas as pd
import tensorflow as tf
import tensorflow_hub as hub
from datetime import datetime
import matplotlib.pyplot as plt
from sklearn.utils.multiclass import unique_labels
from sklearn.metrics import f1_score,confusion_matrix,classification_report,accuracy_score
import logging
logging.basicConfig(stream=sys.stdout, level=logging.ERROR) #INFO)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.max_colwidth', 1000)
config = tf.ConfigProto()
#config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
#config.gpu_options.visible_device_list="0"
from tensorflow.python.client import device_lib
device_lib.list_local_devices()
import bert
from bert import run_classifier_with_tfhub
from bert import optimization
from bert import tokenization
from bert import modeling
import numpy as np
############ Utils functions ##################
def create_examples_prediction(df):
"""Creates examples for the training and dev sets."""
examples = []
for index, row in df.iterrows():
#labels = row[LABEL_HOT_VECTOR].strip('][').split(', ')
#labels = [float(x) for x in labels]
labels = list(row[label_list_text])
examples.append(labels)
return pd.DataFrame(examples)
def f(x):
n = 2 # index of the second proability to get labeled
index = np.argsort(x.values.flatten().tolist())[-n:][0]
print(f"index is {index}")
label = label_list_text[index]
print(f"label is {label}")
return label
final_columns = ["sOrder","Input.text","is_stressor","is_stressor_conf","top_label","second_label","Branch", "Above SD-THRESHOLD","SD-THRESHOLD","SD","Other","Everyday Decision Making","Work","Social Relationships","Financial Problem","Health, Fatigue, or Physical Pain","Emotional Turmoil","Family Issues","School","avg_severity","median_severity","SD_severity","Votes","Source"]
def get_test_experiment_df(test):
test_predictions = [x[0]['probabilities'] for x in zip(getListPrediction(in_sentences=list(test[DATA_COLUMN])))]
test_live_labels = np.array(test_predictions).argmax(axis=1)
test[LABEL_COLUMN_RAW] = [label_list_text[x] for x in test_live_labels] # appending the labels to the dataframe
probabilities_df_live = pd.DataFrame(test_predictions) # creating a proabilities dataset
probabilities_df_live.columns = [x for x in label_list_text] # naming the columns
probabilities_df_live['second_label'] = probabilities_df_live.apply(lambda x:f(x),axis=1)
#print(test)
#label_df = create_examples_prediction(test)
#label_df.columns = label_list_text
#label_df['label 2'] = label_df.apply(lambda x:f(x),axis=1)
test.reset_index(inplace=True,drop=True) # resetting index
test_removed_columns = list(set(test.columns)-set(probabilities_df_live.columns))
test_temp = test[test_removed_columns]
experiment_df = | pd.concat([test_temp,probabilities_df_live],axis=1, ignore_index=False) | pandas.concat |
from datetime import datetime
import re
import unittest
import nose
from nose.tools import assert_equal
import numpy as np
from pandas.tslib import iNaT
from pandas import Series, DataFrame, date_range, DatetimeIndex, Timestamp
from pandas import compat
from pandas.compat import range, long, lrange, lmap, u
from pandas.core.common import notnull, isnull
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.core.config as cf
_multiprocess_can_split_ = True
def test_mut_exclusive():
msg = "mutually exclusive arguments: '[ab]' and '[ab]'"
with tm.assertRaisesRegexp(TypeError, msg):
com._mut_exclusive(a=1, b=2)
assert com._mut_exclusive(a=1, b=None) == 1
assert com._mut_exclusive(major=None, major_axis=None) is None
def test_is_sequence():
is_seq = com._is_sequence
assert(is_seq((1, 2)))
assert(is_seq([1, 2]))
assert(not is_seq("abcd"))
assert(not is_seq(u("abcd")))
assert(not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert(not is_seq(A()))
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
float_series = Series(np.random.randn(5))
obj_series = Series(np.random.randn(5), dtype=object)
assert(isinstance(notnull(float_series), Series))
assert(isinstance(notnull(obj_series), Series))
def test_isnull():
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert not isnull(np.inf)
assert not isnull(-np.inf)
float_series = Series(np.random.randn(5))
obj_series = Series(np.random.randn(5), dtype=object)
assert(isinstance(isnull(float_series), Series))
assert(isinstance(isnull(obj_series), Series))
# call on DataFrame
df = DataFrame(np.random.randn(10, 5))
df['foo'] = 'bar'
result = isnull(df)
expected = result.apply(isnull)
tm.assert_frame_equal(result, expected)
def test_isnull_tuples():
result = isnull((1, 2))
exp = np.array([False, False])
assert(np.array_equal(result, exp))
result = isnull([(False,)])
exp = np.array([[False]])
assert(np.array_equal(result, exp))
result = isnull([(1,), (2,)])
exp = np.array([[False], [False]])
assert(np.array_equal(result, exp))
# list of strings / unicode
result = isnull(('foo', 'bar'))
assert(not result.any())
result = isnull((u('foo'), u('bar')))
assert(not result.any())
def test_isnull_lists():
result = isnull([[False]])
exp = np.array([[False]])
assert(np.array_equal(result, exp))
result = isnull([[1], [2]])
exp = np.array([[False], [False]])
assert(np.array_equal(result, exp))
# list of strings / unicode
result = isnull(['foo', 'bar'])
assert(not result.any())
result = isnull([u('foo'), u('bar')])
assert(not result.any())
def test_isnull_datetime():
assert (not isnull(datetime.now()))
assert notnull(datetime.now())
idx = date_range('1/1/1990', periods=20)
assert(notnull(idx).all())
idx = np.asarray(idx)
idx[0] = iNaT
idx = DatetimeIndex(idx)
mask = isnull(idx)
assert(mask[0])
assert(not mask[1:].any())
def test_datetimeindex_from_empty_datetime64_array():
for unit in [ 'ms', 'us', 'ns' ]:
idx = DatetimeIndex(np.array([], dtype='datetime64[%s]' % unit))
assert(len(idx) == 0)
def test_nan_to_nat_conversions():
df = DataFrame(dict({
'A' : np.asarray(lrange(10),dtype='float64'),
'B' : Timestamp('20010101') }))
df.iloc[3:6,:] = np.nan
result = df.loc[4,'B'].value
assert(result == iNaT)
s = df['B'].copy()
s._data = s._data.setitem(tuple([slice(8,9)]),np.nan)
assert(isnull(s[8]))
# numpy < 1.7.0 is wrong
from distutils.version import LooseVersion
if LooseVersion(np.__version__) >= '1.7.0':
assert(s[8].value == np.datetime64('NaT').astype(np.int64))
def test_any_none():
assert(com._any_none(1, 2, 3, None))
assert(not com._any_none(1, 2, 3, 4))
def test_all_not_none():
assert(com._all_not_none(1, 2, 3, 4))
assert(not com._all_not_none(1, 2, 3, None))
assert(not com._all_not_none(None, None, None, None))
def test_repr_binary_type():
import string
letters = string.ascii_letters
btype = compat.binary_type
try:
raw = btype(letters, encoding=cf.get_option('display.encoding'))
except TypeError:
raw = btype(letters)
b = compat.text_type(compat.bytes_to_str(raw))
res = com.pprint_thing(b, quote_strings=True)
assert_equal(res, repr(b))
res = com.pprint_thing(b, quote_strings=False)
assert_equal(res, b)
def test_rands():
r = com.rands(10)
assert(len(r) == 10)
def test_adjoin():
data = [['a', 'b', 'c'],
['dd', 'ee', 'ff'],
['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined = com.adjoin(2, *data)
assert(adjoined == expected)
def test_iterpairs():
data = [1, 2, 3, 4]
expected = [(1, 2),
(2, 3),
(3, 4)]
result = list(com.iterpairs(data))
assert(result == expected)
def test_split_ranges():
def _bin(x, width):
"return int(x) as a base2 string of given width"
return ''.join(str((x >> i) & 1) for i in range(width - 1, -1, -1))
def test_locs(mask):
nfalse = sum(np.array(mask) == 0)
remaining = 0
for s, e in com.split_ranges(mask):
remaining += e - s
assert 0 not in mask[s:e]
# make sure the total items covered by the ranges are a complete cover
assert remaining + nfalse == len(mask)
# exhaustively test all possible mask sequences of length 8
ncols = 8
for i in range(2 ** ncols):
cols = lmap(int, list(_bin(i, ncols))) # count up in base2
mask = [cols[i] == 1 for i in range(len(cols))]
test_locs(mask)
# base cases
test_locs([])
test_locs([0])
test_locs([1])
def test_indent():
s = 'a b c\nd e f'
result = com.indent(s, spaces=6)
assert(result == ' a b c\n d e f')
def test_banner():
ban = | com.banner('hi') | pandas.core.common.banner |
# We compare armijo line search to fixed learning rate SGD
# when used to fit a CNN / MLP to MNIST
# Linesearch code is from
# https://github.com/IssamLaradji/stochastic_line_search/blob/master/main.py
import superimport
from armijo_sgd import SGD_Armijo, ArmijoModel
# Neural net code is based on various tutorials
#https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html#sphx-glr-beginner-blitz-cifar10-tutorial-py
#https://github.com/CSCfi/machine-learning-scripts/blob/master/notebooks/pytorch-mnist-mlp.ipynb
import numpy as np
np.set_printoptions(precision=3)
import matplotlib.pyplot as plt
import pyprobml_utils as pml
import warnings
warnings.filterwarnings('ignore')
import torch
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
torch.backends.cudnn.benchmark = True
print('Using PyTorch version:', torch.__version__, ' Device:', device)
figdir = "../figures"
import os
############
# Get data
import torchvision
import torchvision.transforms as transforms
import torchvision.datasets as datasets
batch_size = 32
train_dataset = datasets.MNIST('./data',
train=True,
download=True,
transform=transforms.ToTensor())
test_dataset = datasets.MNIST('./data',
train=False,
transform=transforms.ToTensor())
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
for (X_train, y_train) in train_loader:
print('X_train:', X_train.size(), 'type:', X_train.type())
print('y_train:', y_train.size(), 'type:', y_train.type())
break
bs, ncolors, height, width = X_train.shape
nclasses = 10
N_train = train_dataset.data.shape[0]
#####
# Define model
import torch.nn as nn
import torch.nn.functional as F
criterion = nn.CrossEntropyLoss(reduction='mean')
# https://pytorch.org/docs/stable/nn.html#crossentropyloss
# This criterion combines nn.LogSoftmax() and nn.NLLLoss() in one single clas
# Therefore we don't need the LogSoftmax on the final layer
# But we do need it if we use NLLLoss
# The Armijo method assumes gradient noise goes to zero,
# so it is important that we don't have dropout layers.
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(ncolors, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
#self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
# input is 28x28x1
# conv1(kernel=5, filters=10) 28x28x10 -> 24x24x10
# max_pool(kernel=2) 24x24x10 -> 12x12x10
x = F.relu(F.max_pool2d(self.conv1(x), 2))
# conv2(kernel=5, filters=20) 12x12x20 -> 8x8x20
# max_pool(kernel=2) 8x8x20 -> 4x4x20
#x = F.relu(F.max_pool2d(self.dropout(self.conv2(x)), 2))
x = F.relu(F.max_pool2d(self.conv2(x), 2))
# flatten 4x4x20 = 320
x = x.view(-1, 320)
# 320 -> 50
x = F.relu(self.fc1(x))
#x = F.dropout(x, training=self.training)
# 50 -> 10
x = self.fc2(x)
return x
#return F.log_softmax(x)
class MLP(nn.Module):
def __init__(self):
super(MLP, self).__init__()
self.fc1 = nn.Linear(ncolors*height*width, 50)
#self.fc1_drop = nn.Dropout(0.2)
self.fc2 = nn.Linear(50, 50)
#self.fc2_drop = nn.Dropout(0.2)
self.fc3 = nn.Linear(50, nclasses)
def forward(self, x):
x = x.view(-1, ncolors*height*width)
x = F.relu(self.fc1(x))
#x = self.fc1_drop(x)
x = F.relu(self.fc2(x))
#x = self.fc2_drop(x)
x = self.fc3(x)
#return F.log_softmax(x, dim=1)
return x
class Logreg(nn.Module):
def __init__(self):
super(Logreg, self).__init__()
self.fc1 = nn.Linear(ncolors*height*width, nclasses)
def forward(self, x):
x = x.view(-1, ncolors*height*width)
x = self.fc1(x)
#return F.log_softmax(x, dim=1)
return x
def make_model(name, seed=0):
np.random.seed(seed)
if name == 'CNN':
net = CNN()
elif name == 'MLP':
net = MLP()
else:
net = Logreg()
net = net.to(device)
return net
###############
# Define each expermental configuration
expts = []
ep = 4
#model = 'Logreg'
model = 'MLP'
#model = 'CNN'
bs = 10
expts.append({'lr':'armijo', 'bs':bs, 'epochs':ep, 'model': model})
expts.append({'lr':0.01, 'bs':bs, 'epochs':ep, 'model': model})
expts.append({'lr':0.1, 'bs':bs, 'epochs':ep, 'model': model})
#expts.append({'lr':0.5, 'bs':bs, 'epochs':ep, 'model': model})
@torch.no_grad()
def eval_loss(model, loader):
avg_loss = 0.0
model.eval()
for step, (x_batch, y_batch) in enumerate(loader):
# Copy data to GPU if needed
x_batch = x_batch.to(device)
y_batch = y_batch.to(device)
y_pred = model(x_batch)
loss = criterion(y_pred, y_batch)
avg_loss += loss.item()
# Compute average loss per example
# Note that the criterion already averages within each batch.
n_batches = len(loader)
avg_loss /= n_batches
return avg_loss
def fit_epoch(model, optimizer, train_loader, loss_history):
epoch_loss = 0.0
model.train()
for step, (x_batch, y_batch) in enumerate(train_loader):
# Copy data to GPU if needed
x_batch = x_batch.to(device)
y_batch = y_batch.to(device)
# Function to (re)evaluate loss and its gradient for this step.
def closure():
optimizer.zero_grad()
y_pred = model(x_batch)
loss = criterion(y_pred, y_batch)
loss.backward()
return loss
loss = optimizer.step(closure)
batch_loss = loss.item()
epoch_loss += batch_loss
loss_history.append(batch_loss)
# Compute average loss per example for this epoch.
# Note that the criterion already averages within each batch.
n_batches = len(train_loader)
epoch_loss /= n_batches
return epoch_loss
def fit_epoch_armijo(model, optimizer, train_loader, loss_history, step_size_history):
epoch_loss = 0.0
for step, (x_batch, y_batch) in enumerate(train_loader):
x_batch = x_batch.to(device)
y_batch = y_batch.to(device)
batch_loss, step_size = model.step((x_batch, y_batch))
epoch_loss += batch_loss
loss_history.append(batch_loss)
step_size_history.append(step_size)
n_batches = len(train_loader)
epoch_loss /= n_batches
return epoch_loss
results_dict = {}
for expt in expts:
lr = expt['lr']
bs = expt['bs']
max_epochs = expt['epochs']
model_name = expt['model']
model = make_model(model_name)
model.train() # set to training mode
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=bs,
shuffle=True, num_workers=2)
n_batches = len(train_loader)
batch_loss_history = []
epoch_loss_history = []
step_size_history = []
print_every = max(1, int(0.1*max_epochs))
if lr == 'armijo':
name = '{}-armijo-bs{}'.format(model_name, bs)
model = ArmijoModel(model, criterion)
optimizer = SGD_Armijo(model, batch_size=bs, dataset_size=N_train)
model.opt = optimizer
armijo = True
else:
name = '{}-lr{:0.3f}-bs{}'.format(model_name, lr, bs)
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
armijo = False
print('starting {}'.format(name))
for epoch in range(max_epochs):
if armijo:
avg_batch_loss = fit_epoch_armijo(model, optimizer, train_loader, batch_loss_history, step_size_history)
else:
avg_batch_loss = fit_epoch(model, optimizer, train_loader, batch_loss_history)
epoch_loss = eval_loss(model, train_loader)
epoch_loss_history.append(epoch_loss)
if epoch % print_every == 0:
print("epoch {}, loss {}".format(epoch, epoch_loss))
label = '{}-final-loss{:0.3f}'.format(name, epoch_loss)
results = {'label': label, 'batch_loss_history': batch_loss_history,
'epoch_loss_history': epoch_loss_history, 'step_size_history': step_size_history}
results_dict[name] = results
plt.figure()
name = 'MLP-armijo-bs10'
results = results_dict[name]
plt.plot(results['step_size_history'])
plt.ylabel('stepsize')
pml.savefig('armijo-mnist-stepsize.pdf')
plt.show()
plt.figure()
for name, results in results_dict.items():
label = results['label']
y = results['epoch_loss_history']
plt.plot(y, label=label)
plt.legend()
pml.savefig('armijo-mnist-epoch-loss.pdf')
plt.show()
# Add smoothed version of batch loss history to results dict
import pandas as pd
for name, results in results_dict.items():
loss_history = results['batch_loss_history']
df = pd.Series(loss_history)
nsteps = len(loss_history)
smoothed = | pd.Series.ewm(df, span=0.1*nsteps) | pandas.Series.ewm |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import time
import yfinance as yf
import scipy.optimize as spo
pd.options.display.float_format = '{:.3f}'.format
def get_data_close(symbol):
stock = yf.Ticker(symbol)
df = stock.history(period="max")
return | pd.DataFrame(df['Close']) | pandas.DataFrame |
"""Base class for modeling portfolio and measuring its performance.
The job of the `Portfolio` class is to create a series of positions allocated
against a cash component, produce an equity curve, incorporate basic transaction costs
and produce a set of statistics about its performance. In particular it outputs
position/profit metrics and drawdown information.
## Workflow
The workflow of `Portfolio` is simple:
1. Receives a set of inputs, such as entry and exit signals
2. Uses them to generate and fill orders in form of records (simulation part)
3. Calculates a broad range of risk & performance metrics based on these records (analysis part)
It basically builds upon the `vectorbt.portfolio.orders.Orders` class. To simplify creation of order
records and keep track of balances, it exposes several convenience methods with prefix `from_`.
For example, you can use `Portfolio.from_signals` method to generate orders from entry and exit signals.
Alternatively, you can use `Portfolio.from_order_func` to run a custom order function on each tick.
The results are then automatically passed to the constructor method of `Portfolio` and you will
receive a portfolio instance ready to be used for performance analysis.
This way, one can simulate and analyze his/her strategy in a couple of lines.
### Example
The following example does something crazy: it checks candlestick data of 6 major cryptocurrencies
in 2020 against every single pattern found in TA-Lib, and translates them into signals:
```python-repl
>>> import numpy as np
>>> import pandas as pd
>>> from datetime import datetime
>>> import talib
>>> import vectorbt as vbt
>>> # Fetch price history
>>> symbols = ['BTC-USD', 'ETH-USD', 'XRP-USD', 'BNB-USD', 'BCH-USD', 'LTC-USD']
>>> start = datetime(2020, 1, 1)
>>> end = datetime(2020, 9, 1)
>>> ohlcv_by_symbol = vbt.utils.data.download(symbols, start=start, end=end)
>>> # Put assets into a single dataframe by price type
>>> ohlcv = vbt.utils.data.concat_symbols(ohlcv_by_symbol)
>>> ohlcv['Open'].head()
symbol BTC-USD ETH-USD XRP-USD BNB-USD BCH-USD \
Date
2019-12-31 7294.438965 132.612274 0.194518 13.952087 209.301987
2020-01-01 7194.892090 129.630661 0.192912 13.730962 204.671295
2020-01-02 7202.551270 130.820038 0.192708 13.698126 204.354538
2020-01-03 6984.428711 127.411263 0.187948 13.035329 196.007690
2020-01-04 7345.375488 134.168518 0.193521 13.667442 222.536560
symbol LTC-USD
Date
2019-12-31 42.766113
2020-01-01 41.326534
2020-01-02 42.018085
2020-01-03 39.863129
2020-01-04 42.383526
>>> # Run every single pattern recognition indicator and combine results
>>> result = pd.DataFrame.vbt.empty_like(ohlcv['Open'], fill_value=0.)
>>> for pattern in talib.get_function_groups()['Pattern Recognition']:
... PRecognizer = vbt.IndicatorFactory.from_talib(pattern)
... pr = PRecognizer.run(ohlcv['Open'], ohlcv['High'], ohlcv['Low'], ohlcv['Close'])
... result = result + pr.integer
>>> # Don't look into future
>>> result = result.vbt.fshift(1)
>>> # Treat each number as order value in USD
>>> size = result / ohlcv['Open']
>>> # Simulate portfolio
>>> portfolio = vbt.Portfolio.from_orders(
... ohlcv['Close'], size, price=ohlcv['Open'],
... init_cash='autoalign', fees=0.001, slippage=0.001)
>>> # Visualize portfolio value
>>> portfolio.value().vbt.plot()
```

## Broadcasting
`Portfolio` is very flexible towards inputs:
* Accepts both Series and DataFrames as inputs
* Broadcasts inputs to the same shape using vectorbt's own broadcasting rules
* Many inputs (such as `fees`) can be passed as a single value, value per column/row, or as a matrix
* Implements flexible indexing wherever possible to save memory
## Grouping
One of the key features of `Portfolio` is the ability to group columns. Groups can be specified by
`group_by`, which can be anything from positions or names of column levels, to a NumPy array with
actual groups. Groups can be formed to share capital between columns or to compute metrics
for a combined portfolio of multiple independent columns.
For example, let's divide our portfolio into two groups sharing the same cash:
```python-repl
>>> # Simulate combined portfolio
>>> group_by = pd.Index([
... 'first', 'first', 'first',
... 'second', 'second', 'second'
... ], name='group')
>>> comb_portfolio = vbt.Portfolio.from_orders(
... ohlcv['Close'], size, price=ohlcv['Open'],
... init_cash='autoalign', fees=0.001, slippage=0.001,
... group_by=group_by, cash_sharing=True)
>>> # Get total profit per group
>>> comb_portfolio.total_profit()
group
first 21891.431061
second 7575.676246
dtype: float64
```
Not only can you analyze each group, but also each column in the group:
```python-repl
>>> # Get total profit per column
>>> comb_portfolio.total_profit(group_by=False)
symbol
BTC-USD 5163.844396
ETH-USD 13368.521326
XRP-USD 3359.065339
BNB-USD 4724.565229
BCH-USD -259.592709
LTC-USD 3110.703726
dtype: float64
```
In the same way, you can introduce new grouping to the method itself:
```python-repl
>>> # Get total profit per group
>>> portfolio.total_profit(group_by=group_by)
group
first 21891.431061
second 7575.676246
dtype: float64
```
!!! note
If cash sharing is enabled, grouping can be disabled but cannot be modified.
## Indexing
In addition, you can use pandas indexing on the `Portfolio` class itself, which forwards
indexing operation to each argument with index:
```python-repl
>>> portfolio['BTC-USD']
<vectorbt.portfolio.base.Portfolio at 0x7fac7517ac88>
>>> portfolio['BTC-USD'].total_profit()
5163.844396244112
```
Combined portfolio is indexed by group:
```python-repl
>>> comb_portfolio['first']
<vectorbt.portfolio.base.Portfolio at 0x7fac5756b828>
>>> comb_portfolio['first'].total_profit()
21891.43106080097
```
!!! note
Changing index (time axis) is not supported. The object should be treated as a Series
rather than a DataFrame; for example, use `portfolio.iloc[0]` instead of `portfolio.iloc[:, 0]`.
Indexing behavior depends solely upon `vectorbt.base.array_wrapper.ArrayWrapper`.
For example, if `group_select` is enabled indexing will be performed on groups,
otherwise on single columns. You can pass wrapper arguments with `wrapper_kwargs`.
## Logging
To collect more information on how a specific order was processed or to be able to track the whole
simulation from the beginning to the end, you can turn on logging.
```python-repl
>>> # Simulate portfolio with logging
>>> portfolio = vbt.Portfolio.from_orders(
... ohlcv['Close'], size, price=ohlcv['Open'],
... init_cash='autoalign', fees=0.001, slippage=0.001, log=True)
>>> portfolio.logs.records
id idx col group cash_now shares_now val_price_now value_now \
0 0 0 0 0 inf 0.000000 7294.438965 inf
... ... ... ... ... ... ... ... ...
1463 1463 243 5 5 inf 271.629075 62.844059 inf
size size_type ... log new_cash new_shares res_size \
0 NaN 0 ... True inf 0.000000 NaN
... ... ... ... ... ... ... ...
1463 7.956202 0 ... True inf 279.585277 7.956202
res_price res_fees res_side res_status res_status_info order_id
0 NaN NaN -1 1 0 -1
... ... ... ... ... ... ...
1463 62.906903 0.5005 0 0 -1 1075
[1464 rows x 30 columns]
```
Just as orders, logs are also records and thus can be easily analyzed:
```python-repl
>>> from vectorbt.portfolio.enums import OrderStatus
>>> portfolio.logs.map_field('res_status', value_map=OrderStatus).value_counts()
symbol BTC-USD ETH-USD XRP-USD BNB-USD BCH-USD LTC-USD
Ignored 59 72 66 66 66 59
Filled 185 172 178 178 178 185
```
Logging can also be turned on just for one order, row, or column, since as many other
variables it's specified per order and can broadcast automatically.
!!! note
Logging can slow down simulation.
## Caching
`Portfolio` heavily relies upon caching. If a method or a property requires heavy computation,
it's wrapped with `vectorbt.utils.decorators.cached_method` and `vectorbt.utils.decorators.cached_property`
respectively. Caching can be disabled globally via `vectorbt.settings`.
!!! note
Because of caching, class is meant to be immutable and all properties are read-only.
To change any attribute, use the `copy` method and pass the attribute as keyword argument.
If you're running out of memory when working with large arrays, make sure to disable caching
and then store most important time series manually. For example, if you're interested in Sharpe
ratio or other metrics based on returns, run and save `Portfolio.returns` and then use the
`vectorbt.returns.accessors.ReturnsAccessor` to analyze them. Do not use methods akin to
`Portfolio.sharpe_ratio` because they will re-calculate returns each time.
Alternatively, you can precisely point at attributes and methods that should or shouldn't
be cached. For example, you can blacklist the entire `Portfolio` class except a few most called
methods such as `Portfolio.cash_flow` and `Portfolio.share_flow`:
```python-repl
>>> vbt.settings.caching['blacklist'].append('Portfolio')
>>> vbt.settings.caching['whitelist'].extend([
... 'Portfolio.cash_flow',
... 'Portfolio.share_flow'
... ])
```
Define rules for one instance of `Portfolio`:
```python-repl
>>> vbt.settings.caching['blacklist'].append(portfolio)
>>> vbt.settings.caching['whitelist'].extend([
... portfolio.cash_flow,
... portfolio.share_flow
... ])
```
!!! note
Note that the above approach doesn't work for cached properties.
Use tuples of the instance and the property name instead, such as `(portfolio, 'orders')`.
To reset caching:
```python-repl
>>> vbt.settings.caching.reset()
```
"""
import numpy as np
import pandas as pd
from inspect import signature
from collections import OrderedDict
import warnings
from vectorbt.utils import checks
from vectorbt.utils.decorators import cached_property, cached_method
from vectorbt.utils.enum import convert_str_enum_value
from vectorbt.utils.config import merge_dicts
from vectorbt.utils.random import set_seed
from vectorbt.utils.colors import adjust_opacity
from vectorbt.utils.widgets import make_subplots
from vectorbt.base.reshape_fns import to_1d, to_2d, broadcast, broadcast_to
from vectorbt.base.array_wrapper import ArrayWrapper, Wrapping
from vectorbt.generic import nb as generic_nb
from vectorbt.generic.drawdowns import Drawdowns
from vectorbt.signals.generators import RAND, RPROB
from vectorbt.portfolio import nb
from vectorbt.portfolio.orders import Orders
from vectorbt.portfolio.trades import Trades, Positions
from vectorbt.portfolio.logs import Logs
from vectorbt.portfolio.enums import (
InitCashMode,
CallSeqType,
SizeType,
ConflictMode,
Direction
)
def _mean_agg_func(df):
"""Compute mean for `Portfolio.stats`."""
return df.mean(axis=0)
def add_returns_methods(func_names):
"""Class decorator to add `vectorbt.returns.accessors.ReturnsAccessor` methods to `Portfolio`."""
def wrapper(cls):
for func_name in func_names:
if isinstance(func_name, tuple):
ret_func_name = func_name[0]
else:
ret_func_name = func_name
def returns_method(
self,
*args,
group_by=None,
year_freq=None,
ret_func_name=ret_func_name,
active_returns=False,
in_sim_order=False,
reuse_returns=None,
**kwargs):
if reuse_returns is not None:
returns = reuse_returns
else:
if active_returns:
returns = self.active_returns(group_by=group_by)
else:
returns = self.returns(group_by=group_by, in_sim_order=in_sim_order)
returns_acc = returns.vbt.returns(freq=self.wrapper.freq, year_freq=year_freq)
# Select only those arguments in kwargs that are also in the method's signature
# This is done for Portfolio.stats which passes the same kwargs to multiple methods
method = getattr(returns_acc, ret_func_name)
sig = signature(method)
arg_names = [p.name for p in sig.parameters.values() if p.kind == p.POSITIONAL_OR_KEYWORD]
new_kwargs = {}
for arg_name in arg_names:
if arg_name in kwargs:
new_kwargs[arg_name] = kwargs[arg_name]
return method(*args, **new_kwargs)
if isinstance(func_name, tuple):
func_name = func_name[1]
returns_method.__name__ = func_name
returns_method.__qualname__ = f"Portfolio.{func_name}"
returns_method.__doc__ = f"See `vectorbt.returns.accessors.ReturnsAccessor.{ret_func_name}`."
setattr(cls, func_name, cached_method(returns_method))
return cls
return wrapper
@add_returns_methods([
('daily', 'daily_returns'),
('annual', 'annual_returns'),
('cumulative', 'cumulative_returns'),
('annualized', 'annualized_return'),
'annualized_volatility',
'calmar_ratio',
'omega_ratio',
'sharpe_ratio',
'deflated_sharpe_ratio',
'downside_risk',
'sortino_ratio',
'information_ratio',
'beta',
'alpha',
'tail_ratio',
'value_at_risk',
'conditional_value_at_risk',
'capture',
'up_capture',
'down_capture',
'drawdown',
'max_drawdown'
])
class Portfolio(Wrapping):
"""Class for modeling portfolio and measuring its performance.
Args:
wrapper (ArrayWrapper): Array wrapper.
See `vectorbt.base.array_wrapper.ArrayWrapper`.
close (array_like): Reference price, such as close.
order_records (array_like): A structured NumPy array of order records.
log_records (array_like): A structured NumPy array of log records.
init_cash (InitCashMode, float or array_like of float): Initial capital.
cash_sharing (bool): Whether to share cash within the same group.
call_seq (array_like of int): Sequence of calls per row and group.
incl_unrealized (bool): Whether to include unrealized P&L in statistics.
use_filled_close (bool): Whether to forward-backward fill NaN values in `close`.
Doesn't affect simulation and only used for total profit and market value.
See `Portfolio.fill_close`.
!!! note
Use class methods with `from_` prefix to build a portfolio.
The `__init__` method is reserved for indexing purposes.
!!! note
This class is meant to be immutable. To change any attribute, use `Portfolio.copy`."""
def __init__(self, wrapper, close, order_records, log_records, init_cash,
cash_sharing, call_seq, incl_unrealized=None, use_filled_close=None):
Wrapping.__init__(
self,
wrapper,
close=close,
order_records=order_records,
log_records=log_records,
init_cash=init_cash,
cash_sharing=cash_sharing,
call_seq=call_seq,
incl_unrealized=incl_unrealized,
use_filled_close=use_filled_close
)
# Get defaults
from vectorbt import settings
if incl_unrealized is None:
incl_unrealized = settings.portfolio['incl_unrealized']
if use_filled_close is None:
use_filled_close = settings.portfolio['use_filled_close']
# Store passed arguments
self._close = broadcast_to(close, wrapper.dummy(group_by=False))
self._order_records = order_records
self._log_records = log_records
self._init_cash = init_cash
self._cash_sharing = cash_sharing
self._call_seq = call_seq
self._incl_unrealized = incl_unrealized
self._use_filled_close = use_filled_close
def _indexing_func(self, pd_indexing_func):
"""Perform indexing on `Portfolio`."""
new_wrapper, _, group_idxs, col_idxs = \
self.wrapper._indexing_func_meta(pd_indexing_func, column_only_select=True)
new_close = new_wrapper.wrap(to_2d(self.close, raw=True)[:, col_idxs], group_by=False)
new_order_records = self.orders._col_idxs_records(col_idxs)
new_log_records = self.logs._col_idxs_records(col_idxs)
if isinstance(self._init_cash, int):
new_init_cash = self._init_cash
else:
new_init_cash = to_1d(self._init_cash, raw=True)[group_idxs if self.cash_sharing else col_idxs]
new_call_seq = self.call_seq.values[:, col_idxs]
return self.copy(
wrapper=new_wrapper,
close=new_close,
order_records=new_order_records,
log_records=new_log_records,
init_cash=new_init_cash,
call_seq=new_call_seq
)
# ############# Class methods ############# #
@classmethod
def from_holding(cls, close, **kwargs):
"""Simulate portfolio from holding.
Based on `Portfolio.from_signals`."""
return cls.from_signals(close, True, False, accumulate=False, **kwargs)
@classmethod
def from_random(cls, close, n=None, prob=None, entry_prob=None, exit_prob=None,
param_product=False, seed=None, **kwargs):
"""Simulate portfolio from random entry and exit signals.
Generates signals based either on the number of signals `n` or the probability
of encountering a signal `prob`.
If `n` is set, see `vectorbt.signals.generators.RAND`.
If `prob` is set, see `vectorbt.signals.generators.RPROB`.
Based on `Portfolio.from_signals`."""
from vectorbt import settings
if entry_prob is None:
entry_prob = prob
if exit_prob is None:
exit_prob = prob
if seed is None:
seed = settings.portfolio['seed']
if n is not None and (entry_prob is not None or exit_prob is not None):
raise ValueError("Either n or entry_prob and exit_prob should be set")
if n is not None:
rand = RAND.run(
n=n,
input_shape=close.shape,
input_index=close.vbt.wrapper.index,
input_columns=close.vbt.wrapper.columns,
seed=seed
)
entries = rand.entries
exits = rand.exits
elif entry_prob is not None and exit_prob is not None:
rprob = RPROB.run(
entry_prob=entry_prob,
exit_prob=exit_prob,
param_product=param_product,
input_shape=close.shape,
input_index=close.vbt.wrapper.index,
input_columns=close.vbt.wrapper.columns,
seed=seed
)
entries = rprob.entries
exits = rprob.exits
else:
raise ValueError("At least n or entry_prob and exit_prob should be set")
return cls.from_signals(close, entries, exits, seed=seed, **kwargs)
@classmethod
def from_signals(cls, close, entries, exits, size=None, size_type=None, direction=None, price=None,
fees=None, fixed_fees=None, slippage=None, min_size=None, max_size=None,
reject_prob=None, allow_partial=None, raise_reject=None, accumulate=None, log=None,
conflict_mode=None, close_first=None, val_price=None, init_cash=None, cash_sharing=None,
call_seq=None, max_orders=None, max_logs=None, seed=None, group_by=None,
broadcast_kwargs=None, wrapper_kwargs=None, freq=None, **kwargs):
"""Simulate portfolio from entry and exit signals.
Starting with initial cash `init_cash`, for each signal in `entries`, enters a long/short position
by buying/selling `size` of shares. For each signal in `exits`, closes the position by
selling/buying shares. Depending upon accumulation, each entry signal may increase
the position and each exit signal may decrease the position. When both entry and exit signals
are present, ignores them by default. When grouping is enabled with `group_by`, will compute
the performance of the entire group. When `cash_sharing` is enabled, will share the cash among
all columns in the group.
Args:
close (array_like): Reference price, such as close.
Will broadcast.
Will be used for calculating unrealized P&L and portfolio value.
entries (array_like of bool): Boolean array of entry signals.
Will broadcast.
Becomes a long signal if `direction` is `all` or `longonly`, otherwise short.
exits (array_like of bool): Boolean array of exit signals.
Will broadcast.
Becomes a short signal if `direction` is `all` or `longonly`, otherwise long.
size (float or array_like): Size to order.
Will broadcast.
* Set to any number to buy/sell some fixed amount of shares.
Longs are limited by cash in the account, while shorts are unlimited.
* Set to `np.inf` to buy shares for all cash, or `-np.inf` to sell shares for
initial margin of 100%. If `direction` is not `all`, `-np.inf` will close the position.
* Set to `np.nan` or 0 to skip.
!!! note
Sign will be ignored.
size_type (SizeType or array_like): See `vectorbt.portfolio.enums.SizeType`.
Will broadcast.
Only `SizeType.Shares` and `SizeType.Percent` are supported.
Other modes such as target percentage are not compatible with signals since
their logic may contradict the direction of the signal.
!!! note
`SizeType.Percent` does not support position reversal. Switch to a single
direction or use `close_first`.
See warning on `size_type` in `Portfolio.from_orders`.
direction (Direction or array_like): See `vectorbt.portfolio.enums.Direction`.
Will broadcast.
price (array_like of float): Order price.
Defaults to `close`. Will broadcast.
fees (float or array_like): Fees in percentage of the order value.
Will broadcast. Note that 0.01 = 1%.
fixed_fees (float or array_like): Fixed amount of fees to pay per order.
Will broadcast.
slippage (float or array_like): Slippage in percentage of price.
Will broadcast. Note that 0.01 = 1%.
min_size (float or array_like): Minimum size for an order to be accepted.
Will broadcast.
max_size (float or array_like): Maximum size for an order.
Will broadcast.
Will be partially filled if exceeded. You might not be able to properly close
the position if accumulation is enabled and `max_size` is too low.
reject_prob (float or array_like): Order rejection probability.
Will broadcast.
allow_partial (bool or array_like): Whether to allow partial fills.
Will broadcast.
Does not apply when size is `np.inf`.
raise_reject (bool or array_like): Whether to raise an exception if order gets rejected.
Will broadcast.
log (bool or array_like): Whether to log orders.
Will broadcast.
accumulate (bool or array_like): Whether to accumulate signals.
Will broadcast.
Behaves similarly to `Portfolio.from_orders`.
conflict_mode (ConflictMode or array_like): See `vectorbt.portfolio.enums.ConflictMode`.
Will broadcast.
close_first (bool or array_like): Whether to close the position first before reversal.
Will broadcast.
Otherwise reverses the position with a single order and within the same tick.
Takes only effect under `Direction.All`. Requires a second signal to enter
the opposite position. This allows to define parameters such as `fixed_fees` for long
and short positions separately.
val_price (array_like of float): Asset valuation price.
Defaults to `price` if set, otherwise to previous `close`.
See `val_price` in `Portfolio.from_orders`.
init_cash (InitCashMode, float or array_like of float): Initial capital.
See `init_cash` in `Portfolio.from_order_func`.
cash_sharing (bool): Whether to share cash within the same group.
See `cash_sharing` in `Portfolio.from_orders`.
call_seq (CallSeqType or array_like of int): Default sequence of calls per row and group.
See `call_seq` in `Portfolio.from_orders`.
max_orders (int): Size of the order records array.
Defaults to the number of elements in the broadcasted shape.
Set to a lower number if you run out of memory.
max_logs (int): Size of the log records array.
Defaults to the number of elements in the broadcasted shape if any of the `log` is True,
otherwise to 1.
Set to a lower number if you run out of memory.
seed (int): Seed to be set for both `call_seq` and at the beginning of the simulation.
group_by (any): Group columns. See `vectorbt.base.column_grouper.ColumnGrouper`.
broadcast_kwargs (dict): Keyword arguments passed to `vectorbt.base.reshape_fns.broadcast`.
wrapper_kwargs (dict): Keyword arguments passed to `vectorbt.base.array_wrapper.ArrayWrapper`.
freq (any): Index frequency in case `close.index` is not datetime-like.
**kwargs: Keyword arguments passed to the `__init__` method.
All broadcastable arguments will be broadcast using `vectorbt.base.reshape_fns.broadcast`
but keep original shape to utilize flexible indexing and to save memory.
For defaults, see `vectorbt.settings.portfolio`.
!!! hint
If you generated signals using close price, don't forget to shift your signals by one tick
forward, for example, with `signals.vbt.fshift(1)`. In general, make sure to use a price
that comes after the signal.
Also see notes and hints for `Portfolio.from_orders`.
## Example
Some of the ways of how signals are interpreted:
```python-repl
>>> import pandas as pd
>>> import vectorbt as vbt
>>> close = pd.Series([1, 2, 3, 4, 5])
>>> entries = pd.Series([True, True, True, False, False])
>>> exits = pd.Series([False, False, True, True, True])
>>> # Entry opens long, exit closes long
>>> portfolio = vbt.Portfolio.from_signals(
... close, entries, exits, size=1., direction='longonly')
>>> portfolio.share_flow()
0 1.0
1 0.0
2 0.0
3 -1.0
4 0.0
dtype: float64
>>> # Entry opens short, exit closes short
>>> portfolio = vbt.Portfolio.from_signals(
... close, entries, exits, size=1., direction='shortonly')
>>> portfolio.share_flow()
0 -1.0
1 0.0
2 0.0
3 1.0
4 0.0
dtype: float64
>>> # Entry opens long and closes short, exit closes long and opens short
>>> # Reversal within one tick
>>> portfolio = vbt.Portfolio.from_signals(
... close, entries, exits, size=1., direction='all')
>>> portfolio.share_flow()
0 1.0
1 0.0
2 0.0
3 -2.0
4 0.0
dtype: float64
>>> # Reversal within two ticks
>>> # First signal closes position, second signal opens the opposite one
>>> portfolio = vbt.Portfolio.from_signals(
... close, entries, exits, size=1., direction='all',
... close_first=True)
>>> portfolio.share_flow()
0 1.0
1 0.0
2 0.0
3 -1.0
4 -1.0
dtype: float64
>>> # If entry and exit, chooses exit
>>> portfolio = vbt.Portfolio.from_signals(
... close, entries, exits, size=1., direction='all',
... close_first=True, conflict_mode='exit')
>>> portfolio.share_flow()
0 1.0
1 0.0
2 -1.0
3 -1.0
4 0.0
dtype: float64
>>> # Entry means long order, exit means short order
>>> # Acts similar to `from_orders`
>>> portfolio = vbt.Portfolio.from_signals(
... close, entries, exits, size=1., direction='all',
... accumulate=True)
>>> portfolio.share_flow()
0 1.0
1 1.0
2 0.0
3 -1.0
4 -1.0
dtype: float64
>>> # Testing multiple parameters (via broadcasting)
>>> from vectorbt.portfolio.enums import Direction
>>> portfolio = vbt.Portfolio.from_signals(
... close, entries, exits, direction=[list(Direction)],
... broadcast_kwargs=dict(columns_from=Direction._fields))
>>> portfolio.share_flow()
Long Short All
0 100.0 -100.0 100.0
1 0.0 0.0 0.0
2 0.0 0.0 0.0
3 -100.0 50.0 -200.0
4 0.0 0.0 0.0
```
"""
# Get defaults
from vectorbt import settings
if size is None:
size = settings.portfolio['size']
if size_type is None:
size_type = settings.portfolio['signal_size_type']
size_type = convert_str_enum_value(SizeType, size_type)
if direction is None:
direction = settings.portfolio['signal_direction']
direction = convert_str_enum_value(Direction, direction)
if price is None:
price = close
if fees is None:
fees = settings.portfolio['fees']
if fixed_fees is None:
fixed_fees = settings.portfolio['fixed_fees']
if slippage is None:
slippage = settings.portfolio['slippage']
if min_size is None:
min_size = settings.portfolio['min_size']
if max_size is None:
max_size = settings.portfolio['max_size']
if reject_prob is None:
reject_prob = settings.portfolio['reject_prob']
if allow_partial is None:
allow_partial = settings.portfolio['allow_partial']
if raise_reject is None:
raise_reject = settings.portfolio['raise_reject']
if log is None:
log = settings.portfolio['log']
if accumulate is None:
accumulate = settings.portfolio['accumulate']
if conflict_mode is None:
conflict_mode = settings.portfolio['conflict_mode']
conflict_mode = convert_str_enum_value(ConflictMode, conflict_mode)
if close_first is None:
close_first = settings.portfolio['close_first']
if val_price is None:
if price is None:
if checks.is_pandas(close):
val_price = close.vbt.fshift(1)
else:
val_price = np.require(close, dtype=np.float_)
val_price = np.roll(val_price, 1, axis=0)
val_price[0] = np.nan
else:
val_price = price
if init_cash is None:
init_cash = settings.portfolio['init_cash']
init_cash = convert_str_enum_value(InitCashMode, init_cash)
if isinstance(init_cash, int) and init_cash in InitCashMode:
init_cash_mode = init_cash
init_cash = np.inf
else:
init_cash_mode = None
if cash_sharing is None:
cash_sharing = settings.portfolio['cash_sharing']
if call_seq is None:
call_seq = settings.portfolio['call_seq']
call_seq = convert_str_enum_value(CallSeqType, call_seq)
auto_call_seq = False
if isinstance(call_seq, int):
if call_seq == CallSeqType.Auto:
call_seq = CallSeqType.Default
auto_call_seq = True
if seed is None:
seed = settings.portfolio['seed']
if seed is not None:
set_seed(seed)
if freq is None:
freq = settings.portfolio['freq']
if broadcast_kwargs is None:
broadcast_kwargs = {}
if wrapper_kwargs is None:
wrapper_kwargs = {}
if not wrapper_kwargs.get('group_select', True) and cash_sharing:
raise ValueError("group_select cannot be disabled if cash_sharing=True")
# Broadcast inputs
# Only close is broadcast, others can remain unchanged thanks to flexible indexing
broadcastable_args = (
close,
entries,
exits,
size,
size_type,
direction,
price,
fees,
fixed_fees,
slippage,
min_size,
max_size,
reject_prob,
allow_partial,
raise_reject,
accumulate,
log,
conflict_mode,
close_first,
val_price
)
keep_raw = [False] + [True] * (len(broadcastable_args) - 1)
broadcast_kwargs = merge_dicts(dict(require_kwargs=dict(requirements='W')), broadcast_kwargs)
broadcasted_args = broadcast(*broadcastable_args, **broadcast_kwargs, keep_raw=keep_raw)
close = broadcasted_args[0]
if not checks.is_pandas(close):
close = pd.Series(close) if close.ndim == 1 else pd.DataFrame(close)
target_shape_2d = (close.shape[0], close.shape[1] if close.ndim > 1 else 1)
wrapper = ArrayWrapper.from_obj(close, freq=freq, group_by=group_by, **wrapper_kwargs)
cs_group_lens = wrapper.grouper.get_group_lens(group_by=None if cash_sharing else False)
init_cash = np.require(np.broadcast_to(init_cash, (len(cs_group_lens),)), dtype=np.float_)
group_lens = wrapper.grouper.get_group_lens(group_by=group_by)
if checks.is_array(call_seq):
call_seq = nb.require_call_seq(broadcast(call_seq, to_shape=target_shape_2d, to_pd=False))
else:
call_seq = nb.build_call_seq(target_shape_2d, group_lens, call_seq_type=call_seq)
if max_orders is None:
max_orders = target_shape_2d[0] * target_shape_2d[1]
if max_logs is None:
max_logs = target_shape_2d[0] * target_shape_2d[1]
if not np.any(log):
max_logs = 1
# Perform calculation
order_records, log_records = nb.simulate_from_signals_nb(
target_shape_2d,
cs_group_lens, # group only if cash sharing is enabled to speed up
init_cash,
call_seq,
auto_call_seq,
*broadcasted_args[1:],
max_orders,
max_logs,
close.ndim == 2
)
# Create an instance
return cls(
wrapper,
close,
order_records,
log_records,
init_cash if init_cash_mode is None else init_cash_mode,
cash_sharing,
call_seq,
**kwargs
)
@classmethod
def from_orders(cls, close, size, size_type=None, direction=None, price=None, fees=None,
fixed_fees=None, slippage=None, min_size=None, max_size=None, reject_prob=None,
allow_partial=None, raise_reject=None, log=None, val_price=None, init_cash=None,
cash_sharing=None, call_seq=None, max_orders=None, max_logs=None, seed=None,
group_by=None, broadcast_kwargs=None, wrapper_kwargs=None, freq=None, **kwargs):
"""Simulate portfolio from orders.
Starting with initial cash `init_cash`, orders the number of shares specified in `size`
for `price`.
Args:
close (array_like): Reference price, such as close.
Will broadcast.
Will be used for calculating unrealized P&L and portfolio value.
size (float or array_like): Size to order.
Will broadcast.
Behavior depends upon `size_type` and `direction`. For `SizeType.Shares`:
* Set to any number to buy/sell some fixed amount of shares.
Longs are limited by cash in the account, while shorts are unlimited.
* Set to `np.inf` to buy shares for all cash, or `-np.inf` to sell shares for
initial margin of 100%. If `direction` is not `all`, `-np.inf` will close the position.
* Set to `np.nan` or 0 to skip.
For any target size:
* Set to any number to buy/sell amount of shares relative to current holdings or value.
* Set to 0 to close the current position.
* Set to `np.nan` to skip.
size_type (SizeType or array_like): See `vectorbt.portfolio.enums.SizeType`.
Will broadcast.
!!! note
`SizeType.Percent` does not support position reversal. Switch to a single direction.
!!! warning
Be cautious using `SizeType.Percent` with `call_seq` set to 'auto'.
To execute sell orders before buy orders, the value of each order in the group
needs to be approximated in advance. But since `SizeType.Percent` depends
upon cash balance, which cannot be calculated in advance, the latest cash balance
is used. This can yield wrong call sequence for buy orders.
direction (Direction or array_like): See `vectorbt.portfolio.enums.Direction`.
Will broadcast.
price (array_like of float): Order price.
Defaults to `close`. Will broadcast.
fees (float or array_like): Fees in percentage of the order value.
Will broadcast. Note that 0.01 = 1%.
fixed_fees (float or array_like): Fixed amount of fees to pay per order.
Will broadcast.
slippage (float or array_like): Slippage in percentage of price.
Will broadcast. Note that 0.01 = 1%.
min_size (float or array_like): Minimum size for an order to be accepted.
Will broadcast.
max_size (float or array_like): Maximum size for an order.
Will broadcast.
Will be partially filled if exceeded.
reject_prob (float or array_like): Order rejection probability.
Will broadcast.
allow_partial (bool or array_like): Whether to allow partial fills.
Will broadcast.
Does not apply when size is `np.inf`.
raise_reject (bool or array_like): Whether to raise an exception if order gets rejected.
Will broadcast.
log (bool or array_like): Whether to log orders.
Will broadcast.
val_price (array_like of float): Asset valuation price.
Defaults to `price`. Will broadcast.
Used at the time of decision making to calculate value of each asset in the group,
for example, to convert target value into target shares.
!!! note
Make sure to use timestamp for `val_price` that comes before timestamps of
all orders in the group with cash sharing (previous `close` for example),
otherwise you're cheating yourself.
init_cash (InitCashMode, float or array_like of float): Initial capital.
See `init_cash` in `Portfolio.from_order_func`.
cash_sharing (bool): Whether to share cash within the same group.
!!! warning
Introduces cross-asset dependencies.
This method presumes that in a group of assets that share the same capital all
orders will be executed within the same tick and retain their price regardless
of their position in the queue, even though they depend upon each other and thus
cannot be executed in parallel.
call_seq (CallSeqType or array_like of int): Default sequence of calls per row and group.
Each value in this sequence should indicate the position of column in the group to
call next. Processing of `call_seq` goes always from left to right.
For example, `[2, 0, 1]` would first call column 'c', then 'a', and finally 'b'.
* Use `vectorbt.portfolio.enums.CallSeqType` to select a sequence type.
* Set to array to specify custom sequence. Will not broadcast.
If `CallSeqType.Auto` selected, rearranges calls dynamically based on order value.
Calculates value of all orders per row and group, and sorts them by this value.
Sell orders will be executed first to release funds for buy orders.
!!! warning
`CallSeqType.Auto` should be used with caution:
* It not only presumes that order prices are known beforehand, but also that
orders can be executed in arbitrary order and still retain their price.
In reality, this is hardly the case: after processing one asset, some time
has passed and the price for other assets might have already changed.
* Even if you're able to specify a slippage large enough to compensate for
this behavior, slippage itself should depend upon execution order.
This method doesn't let you do that.
* If one order is rejected, it still may execute next orders and possibly
leave them without required funds.
For more control, use `Portfolio.from_order_func`.
max_orders (int): Size of the order records array.
Defaults to the number of elements in the broadcasted shape.
Set to a lower number if you run out of memory.
max_logs (int): Size of the log records array.
Defaults to the number of elements in the broadcasted shape if any of the `log` is True,
otherwise to 1.
Set to a lower number if you run out of memory.
seed (int): Seed to be set for both `call_seq` and at the beginning of the simulation.
group_by (any): Group columns. See `vectorbt.base.column_grouper.ColumnGrouper`.
broadcast_kwargs (dict): Keyword arguments passed to `vectorbt.base.reshape_fns.broadcast`.
wrapper_kwargs (dict): Keyword arguments passed to `vectorbt.base.array_wrapper.ArrayWrapper`.
freq (any): Index frequency in case `close.index` is not datetime-like.
**kwargs: Keyword arguments passed to the `__init__` method.
All broadcastable arguments will be broadcast using `vectorbt.base.reshape_fns.broadcast`
but keep original shape to utilize flexible indexing and to save memory.
For defaults, see `vectorbt.settings.portfolio`.
!!! note
When `call_seq` is not `CallSeqType.Auto`, at each timestamp, processing of the assets in
a group goes strictly in order defined in `call_seq`. This order can't be changed dynamically.
This has one big implication for this particular method: the last asset in the call stack
cannot be processed until other assets are processed. This is the reason why rebalancing
cannot work properly in this setting: one has to specify percentages for all assets beforehand
and then tweak the processing order to sell to-be-sold assets first in order to release funds
for to-be-bought assets. This can be automatically done by using `CallSeqType.Auto`.
!!! hint
All broadcastable arguments can be set per frame, series, row, column, or element.
## Example
Buy 10 shares each tick:
```python-repl
>>> import pandas as pd
>>> import vectorbt as vbt
>>> close = pd.Series([1, 2, 3, 4, 5])
>>> portfolio = vbt.Portfolio.from_orders(close, 10)
>>> portfolio.shares()
0 10.0
1 20.0
2 30.0
3 40.0
4 40.0
dtype: float64
>>> portfolio.cash()
0 90.0
1 70.0
2 40.0
3 0.0
4 0.0
dtype: float64
```
Reverse each position by first closing it:
```python-repl
>>> size = [1, 0, -1, 0, 1]
>>> portfolio = vbt.Portfolio.from_orders(close, size, size_type='targetpercent')
>>> portfolio.shares()
0 100.000000
1 0.000000
2 -66.666667
3 0.000000
4 26.666667
dtype: float64
>>> portfolio.cash()
0 0.000000
1 200.000000
2 400.000000
3 133.333333
4 0.000000
dtype: float64
```
Equal-weighted portfolio as in `vectorbt.portfolio.nb.simulate_nb` example:
It's more compact but has less control over execution:
```python-repl
>>> import numpy as np
>>> np.random.seed(42)
>>> close = pd.DataFrame(np.random.uniform(1, 10, size=(5, 3)))
>>> size = pd.Series(np.full(5, 1/3)) # each column 33.3%
>>> size[1::2] = np.nan # skip every second tick
>>> portfolio = vbt.Portfolio.from_orders(
... close, # acts both as reference and order price here
... size,
... size_type='targetpercent',
... call_seq='auto', # first sell then buy
... group_by=True, # one group
... cash_sharing=True, # assets share the same cash
... fees=0.001, fixed_fees=1., slippage=0.001 # costs
... )
>>> portfolio.holding_value(group_by=False).vbt.plot()
```

"""
# Get defaults
from vectorbt import settings
if size is None:
size = settings.portfolio['size']
if size_type is None:
size_type = settings.portfolio['size_type']
size_type = convert_str_enum_value(SizeType, size_type)
if direction is None:
direction = settings.portfolio['order_direction']
direction = convert_str_enum_value(Direction, direction)
if price is None:
price = close
if fees is None:
fees = settings.portfolio['fees']
if fixed_fees is None:
fixed_fees = settings.portfolio['fixed_fees']
if slippage is None:
slippage = settings.portfolio['slippage']
if min_size is None:
min_size = settings.portfolio['min_size']
if max_size is None:
max_size = settings.portfolio['max_size']
if reject_prob is None:
reject_prob = settings.portfolio['reject_prob']
if allow_partial is None:
allow_partial = settings.portfolio['allow_partial']
if raise_reject is None:
raise_reject = settings.portfolio['raise_reject']
if log is None:
log = settings.portfolio['log']
if val_price is None:
val_price = price
if init_cash is None:
init_cash = settings.portfolio['init_cash']
init_cash = convert_str_enum_value(InitCashMode, init_cash)
if isinstance(init_cash, int) and init_cash in InitCashMode:
init_cash_mode = init_cash
init_cash = np.inf
else:
init_cash_mode = None
if cash_sharing is None:
cash_sharing = settings.portfolio['cash_sharing']
if call_seq is None:
call_seq = settings.portfolio['call_seq']
call_seq = convert_str_enum_value(CallSeqType, call_seq)
auto_call_seq = False
if isinstance(call_seq, int):
if call_seq == CallSeqType.Auto:
call_seq = CallSeqType.Default
auto_call_seq = True
if seed is None:
seed = settings.portfolio['seed']
if seed is not None:
set_seed(seed)
if freq is None:
freq = settings.portfolio['freq']
if broadcast_kwargs is None:
broadcast_kwargs = {}
if wrapper_kwargs is None:
wrapper_kwargs = {}
if not wrapper_kwargs.get('group_select', True) and cash_sharing:
raise ValueError("group_select cannot be disabled if cash_sharing=True")
# Broadcast inputs
# Only close is broadcast, others can remain unchanged thanks to flexible indexing
broadcastable_args = (
close,
size,
size_type,
direction,
price,
fees,
fixed_fees,
slippage,
min_size,
max_size,
reject_prob,
allow_partial,
raise_reject,
log,
val_price
)
keep_raw = [False] + [True] * (len(broadcastable_args) - 1)
broadcast_kwargs = merge_dicts(dict(require_kwargs=dict(requirements='W')), broadcast_kwargs)
broadcasted_args = broadcast(*broadcastable_args, **broadcast_kwargs, keep_raw=keep_raw)
close = broadcasted_args[0]
if not checks.is_pandas(close):
close = pd.Series(close) if close.ndim == 1 else pd.DataFrame(close)
target_shape_2d = (close.shape[0], close.shape[1] if close.ndim > 1 else 1)
wrapper = ArrayWrapper.from_obj(close, freq=freq, group_by=group_by, **wrapper_kwargs)
cs_group_lens = wrapper.grouper.get_group_lens(group_by=None if cash_sharing else False)
init_cash = np.require(np.broadcast_to(init_cash, (len(cs_group_lens),)), dtype=np.float_)
group_lens = wrapper.grouper.get_group_lens(group_by=group_by)
if checks.is_array(call_seq):
call_seq = nb.require_call_seq(broadcast(call_seq, to_shape=target_shape_2d, to_pd=False))
else:
call_seq = nb.build_call_seq(target_shape_2d, group_lens, call_seq_type=call_seq)
if max_orders is None:
max_orders = target_shape_2d[0] * target_shape_2d[1]
if max_logs is None:
max_logs = target_shape_2d[0] * target_shape_2d[1]
if not np.any(log):
max_logs = 1
# Perform calculation
order_records, log_records = nb.simulate_from_orders_nb(
target_shape_2d,
cs_group_lens, # group only if cash sharing is enabled to speed up
init_cash,
call_seq,
auto_call_seq,
*broadcasted_args[1:],
max_orders,
max_logs,
close.ndim == 2
)
# Create an instance
return cls(
wrapper,
close,
order_records,
log_records,
init_cash if init_cash_mode is None else init_cash_mode,
cash_sharing,
call_seq,
**kwargs
)
@classmethod
def from_order_func(cls, close, order_func_nb, *order_args, target_shape=None, keys=None,
init_cash=None, cash_sharing=None, call_seq=None, active_mask=None,
prep_func_nb=None, prep_args=None, group_prep_func_nb=None, group_prep_args=None,
row_prep_func_nb=None, row_prep_args=None, segment_prep_func_nb=None,
segment_prep_args=None, row_wise=None, max_orders=None, max_logs=None,
seed=None, group_by=None, broadcast_kwargs=None, wrapper_kwargs=None, freq=None, **kwargs):
"""Build portfolio from a custom order function.
For details, see `vectorbt.portfolio.nb.simulate_nb`.
if `row_wise` is True, also see `vectorbt.portfolio.nb.simulate_row_wise_nb`.
Args:
close (array_like): Reference price, such as close.
Will broadcast to `target_shape`.
Will be used for calculating unrealized P&L and portfolio value.
order_func_nb (callable): Order generation function.
*order_args: Arguments passed to `order_func_nb`.
target_shape (tuple): Target shape to iterate over. Defaults to `close.shape`.
keys (sequence): Outermost column level.
Each element should correspond to one iteration over columns in `close`.
Should be set only if `target_shape` is bigger than `close.shape`.
init_cash (InitCashMode, float or array_like of float): Initial capital.
By default, will broadcast to the number of columns.
If cash sharing is enabled, will broadcast to the number of groups.
See `vectorbt.portfolio.enums.InitCashMode` to find optimal initial cash.
!!! note
Mode `InitCashMode.AutoAlign` is applied after the portfolio is initialized
to set the same initial cash for all columns/groups. Changing grouping
will change the initial cash, so be aware when indexing.
cash_sharing (bool): Whether to share cash within the same group.
!!! warning
Introduces cross-asset dependencies.
call_seq (CallSeqType or array_like of int): Default sequence of calls per row and group.
* Use `vectorbt.portfolio.enums.CallSeqType` to select a sequence type.
* Set to array to specify custom sequence. Will not broadcast.
!!! note
CallSeqType.Auto should be implemented manually.
Use `auto_call_seq_ctx_nb` in `segment_prep_func_nb`.
active_mask (int or array_like of bool): Mask of whether a particular segment should be executed.
Supplying an integer will activate every n-th row (just for convenience).
Supplying a boolean will broadcast to the number of rows and groups.
prep_func_nb (callable): Simulation preparation function.
prep_args (tuple): Packed arguments passed to `prep_func_nb`.
Defaults to `()`.
group_prep_func_nb (callable): Group preparation function.
Called only if `row_wise` is False.
group_prep_args (tuple): Packed arguments passed to `group_prep_func_nb`.
Defaults to `()`.
row_prep_func_nb (callable): Row preparation function.
Called only if `row_wise` is True.
row_prep_args (tuple): Packed arguments passed to `row_prep_func_nb`.
Defaults to `()`.
segment_prep_func_nb (callable): Segment preparation function.
segment_prep_args (tuple): Packed arguments passed to `segment_prep_func_nb`.
Defaults to `()`.
row_wise (bool): Whether to iterate over rows rather than columns/groups.
See `vectorbt.portfolio.nb.simulate_row_wise_nb`.
max_orders (int): Size of the order records array.
Defaults to the number of elements in the broadcasted shape.
Set to a lower number if you run out of memory.
max_logs (int): Size of the log records array.
Defaults to the number of elements in the broadcasted shape.
Set to a lower number if you run out of memory.
seed (int): Seed to be set for both `call_seq` and at the beginning of the simulation.
group_by (any): Group columns. See `vectorbt.base.column_grouper.ColumnGrouper`.
broadcast_kwargs (dict): Keyword arguments passed to `vectorbt.base.reshape_fns.broadcast`.
wrapper_kwargs (dict): Keyword arguments passed to `vectorbt.base.array_wrapper.ArrayWrapper`.
freq (any): Index frequency in case `close.index` is not datetime-like.
**kwargs: Keyword arguments passed to the `__init__` method.
For defaults, see `vectorbt.settings.portfolio`.
!!! note
All passed functions should be Numba-compiled.
Objects passed as arguments to both functions will not broadcast to `target_shape`
as their purpose is unknown. You should broadcast manually or use flexible indexing.
Also see notes on `Portfolio.from_orders`.
!!! note
In contrast to other methods, the valuation price is previous `close`
instead of order price, since the price of an order is unknown before call.
You can still set valuation price explicitly in `segment_prep_func_nb`.
## Example
Buy 10 shares each tick:
```python-repl
>>> import pandas as pd
>>> from numba import njit
>>> import vectorbt as vbt
>>> from vectorbt.portfolio.nb import create_order_nb
>>> @njit
... def order_func_nb(oc, size):
... return create_order_nb(size=size, price=oc.close[oc.i, oc.col])
>>> close = pd.Series([1, 2, 3, 4, 5])
>>> portfolio = vbt.Portfolio.from_order_func(close, order_func_nb, 10)
>>> portfolio.shares()
0 10.0
1 20.0
2 30.0
3 40.0
4 40.0
dtype: float64
>>> portfolio.cash()
0 90.0
1 70.0
2 40.0
3 0.0
4 0.0
dtype: float64
```
Reverse each position by first closing it. Keep state of last position to determine
which position to open next (just as an example, there are easier ways to do this):
```python-repl
>>> import numpy as np
>>> @njit
... def group_prep_func_nb(gc):
... last_pos_state = np.array([-1])
... return (last_pos_state,)
>>> @njit
... def order_func_nb(oc, last_pos_state):
... if oc.shares_now > 0:
... size = -oc.shares_now # close long
... elif oc.shares_now < 0:
... size = -oc.shares_now # close short
... else:
... if last_pos_state[0] == 1:
... size = -np.inf # open short
... last_pos_state[0] = -1
... else:
... size = np.inf # open long
... last_pos_state[0] = 1
...
... return create_order_nb(size=size, price=oc.close[oc.i, oc.col])
>>> portfolio = vbt.Portfolio.from_order_func(
... close, order_func_nb, group_prep_func_nb=group_prep_func_nb)
>>> portfolio.shares()
0 100.0
1 0.0
2 -100.0
3 0.0
4 20.0
dtype: float64
>>> portfolio.cash()
0 0.0
1 200.0
2 500.0
3 100.0
4 0.0
dtype: float64
```
Equal-weighted portfolio as in `vectorbt.portfolio.nb.simulate_nb` example:
```python-repl
>>> from vectorbt.portfolio.nb import auto_call_seq_ctx_nb
>>> from vectorbt.portfolio.enums import SizeType, Direction
>>> @njit
... def group_prep_func_nb(gc):
... '''Define empty arrays for each group.'''
... size = np.empty(gc.group_len, dtype=np.float_)
... size_type = np.empty(gc.group_len, dtype=np.int_)
... direction = np.empty(gc.group_len, dtype=np.int_)
... temp_float_arr = np.empty(gc.group_len, dtype=np.float_)
... return size, size_type, direction, temp_float_arr
>>> @njit
... def segment_prep_func_nb(sc, size, size_type, direction, temp_float_arr):
... '''Perform rebalancing at each segment.'''
... for k in range(sc.group_len):
... col = sc.from_col + k
... size[k] = 1 / sc.group_len
... size_type[k] = SizeType.TargetPercent
... direction[k] = Direction.LongOnly
... sc.last_val_price[col] = sc.close[sc.i, col]
... auto_call_seq_ctx_nb(sc, size, size_type, direction, temp_float_arr)
... return size, size_type, direction
>>> @njit
... def order_func_nb(oc, size, size_type, direction, fees, fixed_fees, slippage):
... '''Place an order.'''
... col_i = oc.call_seq_now[oc.call_idx]
... return create_order_nb(
... size=size[col_i],
... size_type=size_type[col_i],
... price=oc.close[oc.i, oc.col],
... fees=fees, fixed_fees=fixed_fees, slippage=slippage,
... direction=direction[col_i]
... )
>>> np.random.seed(42)
>>> close = np.random.uniform(1, 10, size=(5, 3))
>>> fees = 0.001
>>> fixed_fees = 1.
>>> slippage = 0.001
>>> portfolio = vbt.Portfolio.from_order_func(
... close, # acts both as reference and order price here
... order_func_nb, fees, fixed_fees, slippage, # order_args as *args
... active_mask=2, # rebalance every second tick
... group_prep_func_nb=group_prep_func_nb,
... segment_prep_func_nb=segment_prep_func_nb,
... cash_sharing=True, group_by=True, # one group with cash sharing
... )
>>> portfolio.holding_value(group_by=False).vbt.plot()
```

"""
# Get defaults
from vectorbt import settings
if not checks.is_pandas(close):
if not checks.is_array(close):
close = np.asarray(close)
close = pd.Series(close) if close.ndim == 1 else pd.DataFrame(close)
if target_shape is None:
target_shape = close.shape
if init_cash is None:
init_cash = settings.portfolio['init_cash']
init_cash = convert_str_enum_value(InitCashMode, init_cash)
if isinstance(init_cash, int) and init_cash in InitCashMode:
init_cash_mode = init_cash
init_cash = np.inf
else:
init_cash_mode = None
if cash_sharing is None:
cash_sharing = settings.portfolio['cash_sharing']
if call_seq is None:
call_seq = settings.portfolio['call_seq']
call_seq = convert_str_enum_value(CallSeqType, call_seq)
if isinstance(call_seq, int):
if call_seq == CallSeqType.Auto:
raise ValueError("CallSeqType.Auto should be implemented manually. "
"Use auto_call_seq_ctx_nb in segment_prep_func_nb.")
if active_mask is None:
active_mask = True
if row_wise is None:
row_wise = settings.portfolio['row_wise']
if seed is None:
seed = settings.portfolio['seed']
if seed is not None:
set_seed(seed)
if freq is None:
freq = settings.portfolio['freq']
if broadcast_kwargs is None:
broadcast_kwargs = {}
require_kwargs = dict(require_kwargs=dict(requirements='W'))
broadcast_kwargs = merge_dicts(require_kwargs, broadcast_kwargs)
if wrapper_kwargs is None:
wrapper_kwargs = {}
if not wrapper_kwargs.get('group_select', True) and cash_sharing:
raise ValueError("group_select cannot be disabled if cash_sharing=True")
# Broadcast inputs
target_shape_2d = (target_shape[0], target_shape[1] if len(target_shape) > 1 else 1)
if close.shape != target_shape:
if len(close.vbt.wrapper.columns) <= target_shape_2d[1]:
if target_shape_2d[1] % len(close.vbt.wrapper.columns) != 0:
raise ValueError("Cannot broadcast close to target_shape")
if keys is None:
keys = pd.Index(np.arange(target_shape_2d[1]), name='iteration_idx')
tile_times = target_shape_2d[1] // len(close.vbt.wrapper.columns)
close = close.vbt.tile(tile_times, keys=keys)
close = broadcast(close, to_shape=target_shape, **broadcast_kwargs)
wrapper = ArrayWrapper.from_obj(close, freq=freq, group_by=group_by, **wrapper_kwargs)
cs_group_lens = wrapper.grouper.get_group_lens(group_by=None if cash_sharing else False)
init_cash = np.require(np.broadcast_to(init_cash, (len(cs_group_lens),)), dtype=np.float_)
group_lens = wrapper.grouper.get_group_lens(group_by=group_by)
if isinstance(active_mask, int):
_active_mask = np.full((target_shape_2d[0], len(group_lens)), False)
_active_mask[0::active_mask] = True
active_mask = _active_mask
else:
active_mask = broadcast(
active_mask,
to_shape=(target_shape_2d[0], len(group_lens)),
to_pd=False,
**require_kwargs
)
if checks.is_array(call_seq):
call_seq = nb.require_call_seq(broadcast(call_seq, to_shape=target_shape_2d, to_pd=False))
else:
call_seq = nb.build_call_seq(target_shape_2d, group_lens, call_seq_type=call_seq)
if max_orders is None:
max_orders = target_shape_2d[0] * target_shape_2d[1]
if max_logs is None:
max_logs = target_shape_2d[0] * target_shape_2d[1]
# Prepare arguments
if prep_func_nb is None:
prep_func_nb = nb.empty_prep_nb
if prep_args is None:
prep_args = ()
if group_prep_func_nb is None:
group_prep_func_nb = nb.empty_prep_nb
if group_prep_args is None:
group_prep_args = ()
if row_prep_func_nb is None:
row_prep_func_nb = nb.empty_prep_nb
if row_prep_args is None:
row_prep_args = ()
if segment_prep_func_nb is None:
segment_prep_func_nb = nb.empty_prep_nb
if segment_prep_args is None:
segment_prep_args = ()
# Perform calculation
if row_wise:
order_records, log_records = nb.simulate_row_wise_nb(
target_shape_2d,
to_2d(close, raw=True),
group_lens,
init_cash,
cash_sharing,
call_seq,
active_mask,
prep_func_nb,
prep_args,
row_prep_func_nb,
row_prep_args,
segment_prep_func_nb,
segment_prep_args,
order_func_nb,
order_args,
max_orders,
max_logs
)
else:
order_records, log_records = nb.simulate_nb(
target_shape_2d,
to_2d(close, raw=True),
group_lens,
init_cash,
cash_sharing,
call_seq,
active_mask,
prep_func_nb,
prep_args,
group_prep_func_nb,
group_prep_args,
segment_prep_func_nb,
segment_prep_args,
order_func_nb,
order_args,
max_orders,
max_logs
)
# Create an instance
return cls(
wrapper,
close,
order_records,
log_records,
init_cash if init_cash_mode is None else init_cash_mode,
cash_sharing,
call_seq,
**kwargs
)
# ############# Properties ############# #
@property
def wrapper(self):
"""Array wrapper."""
if self.cash_sharing:
# Allow only disabling grouping when needed (but not globally, see regroup)
return self._wrapper.copy(
allow_enable=False,
allow_modify=False
)
return self._wrapper
def regroup(self, group_by, **kwargs):
"""Regroup this object.
See `vectorbt.base.array_wrapper.Wrapping.regroup`."""
if self.cash_sharing:
if self.wrapper.grouper.is_grouping_modified(group_by=group_by):
raise ValueError("Cannot modify grouping globally when cash_sharing=True")
return Wrapping.regroup(self, group_by, **kwargs)
@property
def cash_sharing(self):
"""Whether to share cash within the same group."""
return self._cash_sharing
@property
def call_seq(self, wrap_kwargs=None):
"""Sequence of calls per row and group."""
return self.wrapper.wrap(self._call_seq, group_by=False, **merge_dicts({}, wrap_kwargs))
@property
def incl_unrealized(self):
"""Whether to include unrealized trade P&L in statistics."""
return self._incl_unrealized
@property
def use_filled_close(self):
"""Whether to forward-backward fill NaN values in `Portfolio.close`."""
return self._use_filled_close
# ############# Reference price ############# #
@property
def close(self):
"""Price per share series."""
return self._close
@cached_method
def fill_close(self, ffill=True, bfill=True, wrap_kwargs=None):
"""Fill NaN values of `Portfolio.close`.
Use `ffill` and `bfill` to fill forwards and backwards respectively."""
close = to_2d(self.close, raw=True)
if ffill and np.any(np.isnan(close[-1, :])):
close = generic_nb.ffill_nb(close)
if bfill and np.any(np.isnan(close[0, :])):
close = generic_nb.ffill_nb(close[::-1, :])[::-1, :]
return self.wrapper.wrap(close, group_by=False, **merge_dicts({}, wrap_kwargs))
# ############# Records ############# #
@property
def order_records(self):
"""A structured NumPy array of order records."""
return self._order_records
@cached_property
def orders(self):
"""`Portfolio.get_orders` with default arguments."""
return Orders(self.wrapper, self.order_records, self.close)
def get_orders(self, group_by=None):
"""Get order records.
See `vectorbt.portfolio.orders.Orders`."""
return self.orders.regroup(group_by=group_by)
@property
def log_records(self):
"""A structured NumPy array of log records."""
return self._log_records
@cached_property
def logs(self):
"""`Portfolio.get_logs` with default arguments."""
return Logs(self.wrapper, self.log_records)
def get_logs(self, group_by=None):
"""Get log records.
See `vectorbt.portfolio.logs.Logs`."""
return self.logs.regroup(group_by=group_by)
@cached_property
def trades(self):
"""`Portfolio.get_trades` with default arguments."""
return Trades.from_orders(self.orders)
def get_trades(self, group_by=None):
"""Get trade records.
See `vectorbt.portfolio.trades.Trades`."""
return self.trades.regroup(group_by=group_by)
@cached_property
def positions(self):
"""`Portfolio.get_positions` with default arguments."""
return Positions.from_trades(self.trades)
def get_positions(self, group_by=None):
"""Get position records.
See `vectorbt.portfolio.trades.Positions`."""
return self.positions.regroup(group_by=group_by)
@cached_property
def drawdowns(self):
"""`Portfolio.get_drawdowns` with default arguments."""
return self.get_drawdowns()
@cached_method
def get_drawdowns(self, **kwargs):
"""Get drawdown records from `Portfolio.value`.
See `vectorbt.generic.drawdowns.Drawdowns`.
`**kwargs` are passed to `Portfolio.value`."""
return Drawdowns.from_ts(self.value(**kwargs), freq=self.wrapper.freq)
# ############# Shares ############# #
@cached_method
def share_flow(self, direction='all', wrap_kwargs=None):
"""Get share flow series per column."""
direction = convert_str_enum_value(Direction, direction)
share_flow = nb.share_flow_nb(
self.wrapper.shape_2d,
self.orders.values,
self.orders.col_mapper.col_map,
direction
)
return self.wrapper.wrap(share_flow, group_by=False, **merge_dicts({}, wrap_kwargs))
@cached_method
def shares(self, direction='all', wrap_kwargs=None):
"""Get share series per column."""
direction = convert_str_enum_value(Direction, direction)
share_flow = to_2d(self.share_flow(direction='all'), raw=True)
shares = nb.shares_nb(share_flow)
if direction == Direction.LongOnly:
shares = np.where(shares > 0, shares, 0.)
if direction == Direction.ShortOnly:
shares = np.where(shares < 0, -shares, 0.)
return self.wrapper.wrap(shares, group_by=False, **merge_dicts({}, wrap_kwargs))
@cached_method
def pos_mask(self, direction='all', group_by=None, wrap_kwargs=None):
"""Get position mask per column/group."""
direction = convert_str_enum_value(Direction, direction)
shares = to_2d(self.shares(direction=direction), raw=True)
if self.wrapper.grouper.is_grouped(group_by=group_by):
pos_mask = to_2d(self.pos_mask(direction=direction, group_by=False), raw=True)
group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)
pos_mask = nb.pos_mask_grouped_nb(pos_mask, group_lens)
else:
pos_mask = shares != 0
return self.wrapper.wrap(pos_mask, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_method
def pos_coverage(self, direction='all', group_by=None, wrap_kwargs=None):
"""Get position coverage per column/group."""
direction = convert_str_enum_value(Direction, direction)
shares = to_2d(self.shares(direction=direction), raw=True)
if self.wrapper.grouper.is_grouped(group_by=group_by):
pos_mask = to_2d(self.pos_mask(direction=direction, group_by=False), raw=True)
group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)
pos_coverage = nb.pos_coverage_grouped_nb(pos_mask, group_lens)
else:
pos_coverage = np.mean(shares != 0, axis=0)
wrap_kwargs = merge_dicts(dict(name_or_index='pos_coverage'), wrap_kwargs)
return self.wrapper.wrap_reduced(pos_coverage, group_by=group_by, **wrap_kwargs)
# ############# Cash ############# #
@cached_method
def cash_flow(self, group_by=None, short_cash=True, wrap_kwargs=None):
"""Get cash flow series per column/group.
When `short_cash` is set to False, cash never goes above the initial level,
because an operation always costs money."""
if self.wrapper.grouper.is_grouped(group_by=group_by):
cash_flow = to_2d(self.cash_flow(group_by=False), raw=True)
group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)
cash_flow = nb.cash_flow_grouped_nb(cash_flow, group_lens)
else:
cash_flow = nb.cash_flow_nb(
self.wrapper.shape_2d,
self.orders.values,
self.orders.col_mapper.col_map,
short_cash
)
return self.wrapper.wrap(cash_flow, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_property
def init_cash(self):
"""`Portfolio.get_init_cash` with default arguments."""
return self.get_init_cash()
@cached_method
def get_init_cash(self, group_by=None, wrap_kwargs=None):
"""Initial amount of cash per column/group with default arguments.
!!! note
If initial cash is found automatically and no own cash is used throughout simulation
(for example, when shorting), initial cash will be set to 1 instead of 0 to
enable smooth calculation of returns."""
if isinstance(self._init_cash, int):
cash_flow = to_2d(self.cash_flow(group_by=group_by), raw=True)
cash_min = np.min(np.cumsum(cash_flow, axis=0), axis=0)
init_cash = np.where(cash_min < 0, np.abs(cash_min), 1.)
if self._init_cash == InitCashMode.AutoAlign:
init_cash = np.full(init_cash.shape, np.max(init_cash))
else:
init_cash = to_1d(self._init_cash, raw=True)
if self.wrapper.grouper.is_grouped(group_by=group_by):
group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)
init_cash = nb.init_cash_grouped_nb(init_cash, group_lens, self.cash_sharing)
else:
group_lens = self.wrapper.grouper.get_group_lens()
init_cash = nb.init_cash_nb(init_cash, group_lens, self.cash_sharing)
wrap_kwargs = merge_dicts(dict(name_or_index='init_cash'), wrap_kwargs)
return self.wrapper.wrap_reduced(init_cash, group_by=group_by, **wrap_kwargs)
@cached_method
def cash(self, group_by=None, in_sim_order=False, short_cash=True, wrap_kwargs=None):
"""Get cash balance series per column/group."""
if in_sim_order and not self.cash_sharing:
raise ValueError("Cash sharing must be enabled for in_sim_order=True")
cash_flow = to_2d(self.cash_flow(group_by=group_by, short_cash=short_cash), raw=True)
if self.wrapper.grouper.is_grouped(group_by=group_by):
group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)
init_cash = to_1d(self.get_init_cash(group_by=group_by), raw=True)
cash = nb.cash_grouped_nb(
self.wrapper.shape_2d,
cash_flow,
group_lens,
init_cash
)
else:
group_lens = self.wrapper.grouper.get_group_lens()
if self.wrapper.grouper.is_grouping_disabled(group_by=group_by) and in_sim_order:
init_cash = to_1d(self.init_cash, raw=True)
call_seq = to_2d(self.call_seq, raw=True)
cash = nb.cash_in_sim_order_nb(cash_flow, group_lens, init_cash, call_seq)
else:
init_cash = to_1d(self.get_init_cash(group_by=False), raw=True)
cash = nb.cash_nb(cash_flow, group_lens, init_cash)
return self.wrapper.wrap(cash, group_by=group_by, **merge_dicts({}, wrap_kwargs))
# ############# Performance ############# #
@cached_method
def holding_value(self, direction='all', group_by=None, wrap_kwargs=None):
"""Get holding value series per column/group."""
direction = convert_str_enum_value(Direction, direction)
close = to_2d(self.close, raw=True).copy()
shares = to_2d(self.shares(direction=direction), raw=True)
close[shares == 0] = 0. # for price being NaN
if self.wrapper.grouper.is_grouped(group_by=group_by):
holding_value = to_2d(self.holding_value(direction=direction, group_by=False), raw=True)
group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)
holding_value = nb.holding_value_grouped_nb(holding_value, group_lens)
else:
holding_value = nb.holding_value_nb(close, shares)
return self.wrapper.wrap(holding_value, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_method
def gross_exposure(self, direction='all', group_by=None, wrap_kwargs=None):
"""Get gross exposure."""
holding_value = to_2d(self.holding_value(group_by=group_by, direction=direction), raw=True)
cash = to_2d(self.cash(group_by=group_by, short_cash=False), raw=True)
gross_exposure = nb.gross_exposure_nb(holding_value, cash)
return self.wrapper.wrap(gross_exposure, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_method
def net_exposure(self, group_by=None, wrap_kwargs=None):
"""Get net exposure."""
long_exposure = to_2d(self.gross_exposure(direction='longonly', group_by=group_by), raw=True)
short_exposure = to_2d(self.gross_exposure(direction='shortonly', group_by=group_by), raw=True)
net_exposure = long_exposure - short_exposure
return self.wrapper.wrap(net_exposure, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_method
def value(self, group_by=None, in_sim_order=False, wrap_kwargs=None):
"""Get portfolio value series per column/group.
By default, will generate portfolio value for each asset based on cash flows and thus
independent from other assets, with initial cash and shares being that of the entire group.
Useful for generating returns and comparing assets within the same group.
When `group_by` is False and `in_sim_order` is True, returns value generated in
simulation order (see [row-major order](https://en.wikipedia.org/wiki/Row-_and_column-major_order).
This value cannot be used for generating returns as-is. Useful to analyze how value
evolved throughout simulation."""
cash = to_2d(self.cash(group_by=group_by, in_sim_order=in_sim_order), raw=True)
holding_value = to_2d(self.holding_value(group_by=group_by), raw=True)
if self.wrapper.grouper.is_grouping_disabled(group_by=group_by) and in_sim_order:
group_lens = self.wrapper.grouper.get_group_lens()
call_seq = to_2d(self.call_seq, raw=True)
value = nb.value_in_sim_order_nb(cash, holding_value, group_lens, call_seq)
# price of NaN is already addressed by ungrouped_value_nb
else:
value = nb.value_nb(cash, holding_value)
return self.wrapper.wrap(value, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_method
def total_profit(self, group_by=None, wrap_kwargs=None):
"""Get total profit per column/group.
Calculated directly from order records (fast).
Uses filled close if `Portfolio.use_filled_close` is True."""
if self.wrapper.grouper.is_grouped(group_by=group_by):
total_profit = to_1d(self.total_profit(group_by=False), raw=True)
group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)
total_profit = nb.total_profit_grouped_nb(
total_profit,
group_lens
)
else:
if self.use_filled_close:
close = to_2d(self.fill_close(), raw=True)
else:
close = to_2d(self.close, raw=True)
total_profit = nb.total_profit_nb(
self.wrapper.shape_2d,
close,
self.orders.values,
self.orders.col_mapper.col_map
)
wrap_kwargs = merge_dicts(dict(name_or_index='total_profit'), wrap_kwargs)
return self.wrapper.wrap_reduced(total_profit, group_by=group_by, **wrap_kwargs)
@cached_method
def final_value(self, group_by=None, wrap_kwargs=None):
"""Get total profit per column/group."""
init_cash = to_1d(self.get_init_cash(group_by=group_by), raw=True)
total_profit = to_1d(self.total_profit(group_by=group_by), raw=True)
final_value = nb.final_value_nb(total_profit, init_cash)
wrap_kwargs = merge_dicts(dict(name_or_index='final_value'), wrap_kwargs)
return self.wrapper.wrap_reduced(final_value, group_by=group_by, **wrap_kwargs)
@cached_method
def total_return(self, group_by=None, wrap_kwargs=None):
"""Get total profit per column/group."""
init_cash = to_1d(self.get_init_cash(group_by=group_by), raw=True)
total_profit = to_1d(self.total_profit(group_by=group_by), raw=True)
total_return = nb.total_return_nb(total_profit, init_cash)
wrap_kwargs = merge_dicts(dict(name_or_index='total_return'), wrap_kwargs)
return self.wrapper.wrap_reduced(total_return, group_by=group_by, **wrap_kwargs)
@cached_method
def returns(self, group_by=None, in_sim_order=False, wrap_kwargs=None):
"""Get return series per column/group based on portfolio value."""
value = to_2d(self.value(group_by=group_by, in_sim_order=in_sim_order), raw=True)
if self.wrapper.grouper.is_grouping_disabled(group_by=group_by) and in_sim_order:
group_lens = self.wrapper.grouper.get_group_lens()
init_cash_grouped = to_1d(self.init_cash, raw=True)
call_seq = to_2d(self.call_seq, raw=True)
returns = nb.returns_in_sim_order_nb(value, group_lens, init_cash_grouped, call_seq)
else:
init_cash = to_1d(self.get_init_cash(group_by=group_by), raw=True)
returns = nb.returns_nb(value, init_cash)
return self.wrapper.wrap(returns, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_method
def active_returns(self, group_by=None, wrap_kwargs=None):
"""Get active return series per column/group.
This type of returns is based solely on cash flows and holding value rather than portfolio value.
It ignores passive cash and thus it will return the same numbers irrespective of the amount of
cash currently available, even `np.inf`. The scale of returns is comparable to that of going
all in and keeping available cash at zero."""
cash_flow = to_2d(self.cash_flow(group_by=group_by), raw=True)
holding_value = to_2d(self.holding_value(group_by=group_by), raw=True)
active_returns = nb.active_returns_nb(cash_flow, holding_value)
return self.wrapper.wrap(active_returns, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_method
def market_value(self, group_by=None, wrap_kwargs=None):
"""Get market (benchmark) value series per column/group.
If grouped, evenly distributes initial cash among assets in the group.
Uses filled close if `Portfolio.use_filled_close` is True.
!!! note
Does not take into account fees and slippage. For this, create a separate portfolio."""
if self.use_filled_close:
close = to_2d(self.fill_close(), raw=True)
else:
close = to_2d(self.close, raw=True)
if self.wrapper.grouper.is_grouped(group_by=group_by):
group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)
init_cash_grouped = to_1d(self.get_init_cash(group_by=group_by), raw=True)
market_value = nb.market_value_grouped_nb(close, group_lens, init_cash_grouped)
else:
init_cash = to_1d(self.get_init_cash(group_by=False), raw=True)
market_value = nb.market_value_nb(close, init_cash)
return self.wrapper.wrap(market_value, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_method
def market_returns(self, group_by=None, wrap_kwargs=None):
"""Get return series per column/group based on market (benchmark) value."""
market_value = to_2d(self.market_value(group_by=group_by), raw=True)
init_cash = to_1d(self.get_init_cash(group_by=group_by), raw=True)
market_returns = nb.returns_nb(market_value, init_cash)
return self.wrapper.wrap(market_returns, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_method
def total_market_return(self, group_by=None, wrap_kwargs=None):
"""Get total market (benchmark) return."""
market_value = to_2d(self.market_value(group_by=group_by), raw=True)
total_market_return = nb.total_market_return_nb(market_value)
wrap_kwargs = merge_dicts(dict(name_or_index='total_market_return'), wrap_kwargs)
return self.wrapper.wrap_reduced(total_market_return, group_by=group_by, **wrap_kwargs)
@cached_method
def stats(self, column=None, group_by=None, incl_unrealized=None, active_returns=False,
in_sim_order=False, agg_func=_mean_agg_func, wrap_kwargs=None, **kwargs):
"""Compute various statistics on this portfolio.
`kwargs` will be passed to each `vectorbt.returns.accessors.ReturnsAccessor` method.
Can either return aggregated statistics by reducing metrics of all columns with
`agg_func` (mean by default) or return statistics for a single column if `column`
was specified or portfolio contains only one column of data. To display rich data types
such as durations correctly, use an aggregation function that can be applied on `pd.Series`.
!!! note
Use `column` only if caching is enabled, otherwise it may re-compute the same
objects multiple times."""
if self.wrapper.freq is None:
raise ValueError("Couldn't parse the frequency of index. You must set `freq`.")
# Pre-calculate
trades = self.get_trades(group_by=group_by)
if incl_unrealized is None:
incl_unrealized = self.incl_unrealized
if not incl_unrealized:
trades = trades.closed
drawdowns = self.get_drawdowns(group_by=group_by)
if active_returns:
returns = self.active_returns(group_by=group_by)
else:
returns = self.returns(group_by=group_by, in_sim_order=in_sim_order)
# Run stats
stats_df = pd.DataFrame({
'Start': self.wrapper.index[0],
'End': self.wrapper.index[-1],
'Duration': self.wrapper.shape[0] * self.wrapper.freq,
'Init. Cash': self.get_init_cash(group_by=group_by),
'Total Profit': self.total_profit(group_by=group_by),
'Total Return [%]': self.total_return(group_by=group_by) * 100,
'Benchmark Return [%]': self.total_market_return(group_by=group_by) * 100,
'Position Coverage [%]': self.pos_coverage(group_by=group_by) * 100,
'Max. Drawdown [%]': -drawdowns.max_drawdown() * 100,
'Avg. Drawdown [%]': -drawdowns.avg_drawdown() * 100,
'Max. Drawdown Duration': drawdowns.max_duration(),
'Avg. Drawdown Duration': drawdowns.avg_duration(),
'Num. Trades': trades.count(),
'Win Rate [%]': trades.win_rate() * 100,
'Best Trade [%]': trades.returns.max() * 100,
'Worst Trade [%]': trades.returns.min() * 100,
'Avg. Trade [%]': trades.returns.mean() * 100,
'Max. Trade Duration': trades.duration.max(wrap_kwargs=dict(time_units=True)),
'Avg. Trade Duration': trades.duration.mean(wrap_kwargs=dict(time_units=True)),
'Expectancy': trades.expectancy(),
'SQN': trades.sqn(),
'Gross Exposure': self.gross_exposure(group_by=group_by).mean(),
'Sharpe Ratio': self.sharpe_ratio(reuse_returns=returns, **kwargs),
'Sortino Ratio': self.sortino_ratio(reuse_returns=returns, **kwargs),
'Calmar Ratio': self.calmar_ratio(reuse_returns=returns, **kwargs)
}, index=self.wrapper.grouper.get_columns(group_by=group_by))
# Select columns or reduce
if self.wrapper.get_ndim(group_by=group_by) == 1:
wrap_kwargs = merge_dicts(dict(name_or_index=stats_df.columns), wrap_kwargs)
return self.wrapper.wrap_reduced(stats_df.iloc[0], group_by=group_by, **wrap_kwargs)
if column is not None:
return stats_df.loc[column]
if agg_func is not None:
if agg_func == _mean_agg_func:
warnings.warn("Taking mean across columns. To return a DataFrame, pass agg_func=None.", stacklevel=2)
func_name = 'stats_mean'
else:
func_name = 'stats_' + agg_func.__name__
agg_stats_sr = pd.Series(index=stats_df.columns, name=func_name)
agg_stats_sr.iloc[:3] = stats_df.iloc[0, :3]
agg_stats_sr.iloc[3:] = agg_func(stats_df.iloc[:, 3:])
return agg_stats_sr
return stats_df
def returns_stats(self, column=None, group_by=None, active_returns=False, in_sim_order=False,
agg_func=_mean_agg_func, year_freq=None, **kwargs):
"""Compute various statistics on returns of this portfolio.
For keyword arguments and notes, see `Portfolio.stats`.
`kwargs` will be passed to `vectorbt.returns.accessors.ReturnsAccessor.stats` method.
If `benchmark_rets` is not set, uses `Portfolio.market_returns`."""
# Pre-calculate
if active_returns:
returns = self.active_returns(group_by=group_by)
else:
returns = self.returns(group_by=group_by, in_sim_order=in_sim_order)
# Run stats
if 'benchmark_rets' not in kwargs:
kwargs['benchmark_rets'] = self.market_returns(group_by=group_by)
stats_obj = returns.vbt.returns(freq=self.wrapper.freq, year_freq=year_freq).stats(**kwargs)
# Select columns or reduce
if checks.is_series(stats_obj):
return stats_obj
if column is not None:
return stats_obj.loc[column]
if agg_func is not None:
if agg_func == _mean_agg_func:
warnings.warn("Taking mean across columns. To return a DataFrame, pass agg_func=None.", stacklevel=2)
func_name = 'stats_mean'
else:
func_name = 'stats_' + agg_func.__name__
agg_stats_sr = | pd.Series(index=stats_obj.columns, name=func_name) | pandas.Series |
import unittest
import numpy as np
import pandas as pd
from pyalink.alink import *
class TestSqlQuery(unittest.TestCase):
def test_batch(self):
source = CsvSourceBatchOp() \
.setSchemaStr(
"sepal_length double, sepal_width double, petal_length double, petal_width double, category string") \
.setFilePath("https://alink-test-data.oss-cn-hangzhou.aliyuncs.com/iris.csv")
source.registerTableName("A")
result = BatchOperator.sqlQuery("SELECT sepal_length FROM A")
result.print()
def test_batch2(self):
data = np.array([
["1", 1, 1.1, 1.0, True],
["2", -2, 0.9, 2.0, False],
["3", 100, -0.01, 3.0, True],
["4", -99, None, 4.0, False],
["5", 1, 1.1, 5.0, True],
["6", -2, 0.9, 6.0, False]
])
df = pd.DataFrame({"f1": data[:, 0], "f2": data[:, 1], "f3": data[:, 2], "f4": data[:, 3], "f5": data[:, 4]})
data = dataframeToOperator(df, schemaStr='f1 string, f2 long, f3 double, f4 double, f5 boolean', opType='batch')
data.print()
data.registerTableName("t1")
data.registerTableName("t2")
res = BatchOperator.sqlQuery("select a.f1,b.f2 from t1 as a join t2 as b on a.f1=b.f1")
res.print()
def test_batch3(self):
data = np.array([
["1", 1, 1.1, 1.0, True],
["2", -2, 0.9, 2.0, False],
["3", 100, -0.01, 3.0, True],
["4", -99, None, 4.0, False],
["5", 1, 1.1, 5.0, True],
["6", -2, 0.9, 6.0, False]
])
df = pd.DataFrame({"f1": data[:, 0], "f2": data[:, 1], "f3": data[:, 2], "f4": data[:, 3], "f5": data[:, 4]})
data = dataframeToOperator(df, schemaStr='f1 string, f2 long, f3 double, f4 double, f5 boolean', opType='batch')
data.print()
data.registerTableName("select")
data.registerTableName("t2")
res = BatchOperator.sqlQuery("select a.f1,b.f2 from `select` as a join t2 as b on a.f1=b.f1")
res.print()
def test_stream(self):
source = CsvSourceStreamOp() \
.setSchemaStr(
"sepal_length double, sepal_width double, petal_length double, petal_width double, category string") \
.setFilePath("https://alink-test-data.oss-cn-hangzhou.aliyuncs.com/iris.csv")
source.registerTableName("A")
result = source.sqlQuery("SELECT sepal_length FROM A")
result.print()
StreamOperator.execute()
def test_stream2(self):
data = np.array([
["1", 1, 1.1, 1.0, True],
["2", -2, 0.9, 2.0, False],
["3", 100, -0.01, 3.0, True],
["4", -99, None, 4.0, False],
["5", 1, 1.1, 5.0, True],
["6", -2, 0.9, 6.0, False]
])
df = | pd.DataFrame({"f1": data[:, 0], "f2": data[:, 1], "f3": data[:, 2], "f4": data[:, 3], "f5": data[:, 4]}) | pandas.DataFrame |
#!/usr/bin/env python3
"""Artistools - NLTE population related functions."""
import math
import multiprocessing
# import os
import re
# import sys
from functools import lru_cache
from functools import partial
from pathlib import Path
# from itertools import chain
# import matplotlib.pyplot as plt
# import matplotlib.ticker as ticker
# import numpy as np
import pandas as pd
from astropy import constants as const
# import numpy as np
# import matplotlib as mpl
import artistools as at
def texifyterm(strterm):
"""Replace a term string with TeX notation equivalent."""
strtermtex = ''
passed_term_Lchar = False
for termpiece in re.split('([_A-Za-z])', strterm):
if re.match('[0-9]', termpiece) is not None and not passed_term_Lchar:
# 2S + 1 number
strtermtex += r'$^{' + termpiece + r'}$'
elif re.match('[A-Z]', termpiece) is not None:
# L character - SPDFGH...
strtermtex += termpiece
passed_term_Lchar = True
elif re.match('[eo]', termpiece) is not None and passed_term_Lchar:
# odd flag, but don't want to confuse it with the energy index (e.g. o4Fo[2])
strtermtex += r'$^{\rm ' + termpiece + r'}$'
elif re.match(r'[0-9]?.*\]', termpiece) is not None:
# J value
strtermtex += termpiece.split('[')[0] + r'$_{' + termpiece.lstrip('0123456789').strip('[]') + r'}$'
elif re.match('[0-9]', termpiece) is not None and passed_term_Lchar:
# extra number after S char
strtermtex += termpiece
strtermtex = strtermtex.replace('$$', '')
return strtermtex
def texifyconfiguration(levelname):
"""Replace a level configuration with the formatted LaTeX equivalent."""
# the underscore gets confused with LaTeX subscript operator, so switch it to the hash symbol
strout = '#'.join(levelname.split('_')[:-1]) + '#'
for strorbitalocc in re.findall(r'[0-9][a-z][0-9]?[#(]', strout):
n, lchar, occ = re.split('([a-z])', strorbitalocc)
lastchar = '(' if occ.endswith('(') else '#'
occ = occ.rstrip('#(')
strorbitalocctex = n + lchar + (r'$^{' + occ + r'}$' if occ else '') + lastchar
strout = strout.replace(strorbitalocc, strorbitalocctex)
for parentterm in re.findall(r'\([0-9][A-Z][^)]?\)', strout):
parentermtex = f'({texifyterm(parentterm.strip("()"))})'
strout = strout.replace(parentterm, parentermtex)
strterm = levelname.split('_')[-1]
strout += ' ' + texifyterm(strterm)
strout = strout.replace('#', '')
strout = strout.replace('$$', '')
# print(f"Replacing levelname '{levelname}' with '{strout}'")
return strout
def add_lte_pops(modelpath, dfpop, columntemperature_tuples, noprint=False, maxlevel=-1):
"""Add columns to dfpop with LTE populations.
columntemperature_tuples is a sequence of tuples of column name and temperature, e.g., ('mycolumn', 3000)
"""
k_b = const.k_B.to('eV / K').value
for _, row in dfpop.drop_duplicates(['modelgridindex', 'timestep', 'Z', 'ion_stage']).iterrows():
modelgridindex = int(row.modelgridindex)
timestep = int(row.timestep)
Z = int(row.Z)
ion_stage = int(row.ion_stage)
ionlevels = at.atomic.get_levels(modelpath).query('Z == @Z and ion_stage == @ion_stage').iloc[0].levels
gs_g = ionlevels.iloc[0].g
gs_energy = ionlevels.iloc[0].energy_ev
gs_pop = dfpop.query(
'modelgridindex == @modelgridindex and timestep == @timestep '
'and Z == @Z and ion_stage == @ion_stage and level == 0').iloc[0]['n_NLTE']
masksuperlevel = (
(dfpop['modelgridindex'] == modelgridindex) & (dfpop['timestep'] == timestep)
& (dfpop['Z'] == Z) & (dfpop['ion_stage'] == ion_stage) & (dfpop['level'] == -1))
masknotsuperlevel = (
(dfpop['modelgridindex'] == modelgridindex) & (dfpop['timestep'] == timestep)
& (dfpop['Z'] == Z) & (dfpop['ion_stage'] == ion_stage) & (dfpop['level'] != -1))
def f_ltepop(x, T_exc, gsg, gse, ionlevels):
return (ionlevels.iloc[int(x.level)].g / gsg
* math.exp(- (ionlevels.iloc[int(x.level)].energy_ev - gse) / k_b / T_exc))
for columnname, T_exc in columntemperature_tuples:
dfpop.loc[masknotsuperlevel, columnname] = dfpop.loc[masknotsuperlevel].apply(
f_ltepop, args=(T_exc, gs_g, gs_energy, ionlevels), axis=1)
if not dfpop[masksuperlevel].empty:
levelnumber_sl = dfpop.query(
'modelgridindex == @modelgridindex and timestep == @timestep '
'and Z == @Z and ion_stage == @ion_stage').level.max() + 1
if maxlevel < 0 or levelnumber_sl <= maxlevel:
if not noprint:
print(f'{at.elsymbols[Z]} {at.roman_numerals[ion_stage]} '
f'has a superlevel at level {levelnumber_sl}')
for columnname, T_exc in columntemperature_tuples:
dfpop.loc[masksuperlevel, columnname] = ionlevels.iloc[levelnumber_sl:].eval(
'g / @gs_g * exp(- (energy_ev - @gs_energy) / @k_b / @T_exc)').sum()
dfpop.loc[masksuperlevel, 'level'] = levelnumber_sl + 2
return dfpop
@at.diskcache(savegzipped=True)
def read_file(nltefilepath):
"""Read NLTE populations from one file."""
if not nltefilepath.is_file():
nltefilepathgz = Path(str(nltefilepath) + '.gz')
nltefilepathxz = Path(str(nltefilepath) + '.xz')
if nltefilepathxz.is_file():
nltefilepath = nltefilepathxz
elif nltefilepathgz.is_file():
nltefilepath = nltefilepathgz
else:
# if the first file is not found in the folder, then skip the folder
print(f'Warning: Could not find {nltefilepath}')
return pd.DataFrame()
filesize = Path(nltefilepath).stat().st_size / 1024 / 1024
print(f'Reading {nltefilepath} ({filesize:.2f} MiB)')
try:
dfpop = pd.read_csv(nltefilepath, delim_whitespace=True)
except pd.errors.EmptyDataError:
return | pd.DataFrame() | pandas.DataFrame |
"""
Short summary.
Extract the features of the ego-noise data...
"""
import os
import shutil
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import librosa
import matplotlib.pyplot as plt
from aircraft_detector.utils.utils import (
retrieve_files,
get_feature_directory_name,
refresh_directory,
print_verbose,
load_spectrum_settings,
)
import aircraft_detector.utils.feature_helper as fh
import aircraft_detector.utils.plot_helper as ph
class FeatureExtraction:
def __init__(self, root_directory, feature_settings=None):
# set root directory
self._dir_root = root_directory
# set the missing feature settings to their defaults
if feature_settings is None:
feature_settings = {}
self._feature = load_spectrum_settings(feature_settings)
# derive root output directory (feature dataset) from parameters
self._dir_root_set = os.path.join(
self._dir_root,
"Ego-Noise Prediction",
"Parameter Sets",
get_feature_directory_name(self._feature),
)
# verbosity
self.verbose = True # print when method is finished
self.super_verbose = False # print for every single file
def split_mav_data(self, train_test_ratio=0.8, train_val_ratio=0.8):
# get files
dir_audio = os.path.join(self._dir_root, "Raw", "Mav", "Audio")
files_audio = [
os.path.join(dir_audio, f) for f in sorted(os.listdir(dir_audio))
]
dir_states = os.path.join(self._dir_root, "Raw", "Mav", "States")
files_states = [
os.path.join(dir_states, f) for f in sorted(os.listdir(dir_states))
]
# split files into train-val-test
audio_train, audio_test, states_train, states_test = train_test_split(
files_audio, files_states, train_size=train_test_ratio, random_state=42
)
audio_train, audio_val, states_train, states_val = train_test_split(
audio_train, states_train, train_size=train_val_ratio, random_state=42
)
# Group the files
files_train = [audio_train, states_train]
files_val = [audio_val, states_val]
files_test = [audio_test, states_test]
files = [files_train, files_val, files_test]
# Output directory for the split
dir_root_out = os.path.join(self._dir_root, "Ego-Noise Prediction", "Dataset")
# Loop over subsets and data types
for i, subset in enumerate(["Train", "Val", "Test"]):
for j, data in enumerate(["Audio", "States"]):
# Output directory for subset, data
dir_dest = os.path.join(dir_root_out, subset, data)
refresh_directory(dir_dest)
# Copy to destination
for f in files[i][j]:
shutil.copy(f, dir_dest)
def extract_spectra(self, offset=50, scaling=80):
# Loop over subsets
for subset in ["Train", "Val", "Test"]:
# Get audio files
dir_audio = os.path.join(
self._dir_root, "Ego-Noise Prediction", "Dataset", subset, "Audio"
)
files_audio = retrieve_files(dir_audio)
# directory for the unsynchronized spectra
dir_output = os.path.join(
self._dir_root_set, "Unsynchronized", subset, "Spectra"
)
# Refresh directory
refresh_directory(dir_output)
# Loop through files in set
for f in files_audio:
# Extract spectrum
Z = fh.extract_spectrum(f, self._feature)
# Scale spectrum
Z += offset
Z /= scaling
# Save to appropriate directory
fn = os.path.split(f)[-1].replace(".wav", ".csv")
fp = os.path.join(dir_output, fn)
pd.DataFrame(Z).to_csv(fp, index=False, header=False)
print_verbose(
self.super_verbose,
"Finished extracting feature for '%s' set." % subset,
)
def extract_states(self):
# Loop over subsets
for subset in ["Train", "Val", "Test"]:
# Get states files
dir_states = os.path.join(
self._dir_root, "Ego-Noise Prediction", "Dataset", subset, "States"
)
files_states = retrieve_files(dir_states)
# Directory for the unsynchronized states
dir_output = os.path.join(
self._dir_root_set, "Unsynchronized", subset, "States"
)
refresh_directory(dir_output)
# Loop through files in set
for f in files_states: # xyz in NED frame
# Read in as dataframe
df = | pd.read_csv(f, header=0) | pandas.read_csv |
import os
from collections import defaultdict
from datetime import datetime
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import requests
from bs4 import BeautifulSoup
from config import *
from extractor import extract
from utils import create_client_engine, validate_df
# from sqlalchemy import create_engine
# from iterator import copy_string_iterator
def create_staging_table():
client, client_engine = create_client_engine()
client.execute(
"""
CREATE UNLOGGED TABLE IF NOT EXISTS staging_{DATAWAREHOUSE_TABLE} (
referrer VARCHAR(500) NOT NULL,
resource VARCHAR(500) NOT NULL,
type VARCHAR(200) NOT NULL,
number_of_occurrences BIGINT NOT NULL,
date DATE NOT NULL,
wiki VARCHAR(200) NOT NULL
);
""".format(
DATAWAREHOUSE_TABLE=DATAWAREHOUSE_TABLE
)
)
client.disconnect()
logger.info("Staging table created.")
def create_tables():
client, client_engine = create_client_engine()
client.execute(
"""
CREATE TABLE IF NOT EXISTS {DATACATALOG_TABLE} (
folder_name VARCHAR(200) NOT NULL,
file_name VARCHAR(200) NOT NULL,
resource_url VARCHAR(200) NOT NULL,
size_in_bytes BIGINT NOT NULL,
data_inserted BOOLEAN NOT NULL,
number_of_rows BIGINT NOT NULL,
CONSTRAINT {DATACATALOG_TABLE}_pk PRIMARY KEY (folder_name, file_name)
)
""".format(
DATACATALOG_TABLE=DATACATALOG_TABLE
)
)
client.execute(
"""
CREATE TABLE IF NOT EXISTS {DATAWAREHOUSE_TABLE} (
referrer VARCHAR(500) NOT NULL,
resource VARCHAR(500) NOT NULL,
type VARCHAR(200) NOT NULL,
number_of_occurrences BIGINT NOT NULL,
date DATE NOT NULL,
wiki VARCHAR(200) NOT NULL
)
""".format(
DATAWAREHOUSE_TABLE=DATAWAREHOUSE_TABLE
)
)
client.disconnect()
logger.info("Tables created.")
def populate_datacatalog():
client, client_engine = create_client_engine()
datacatalog = pd.read_sql_table(DATACATALOG_TABLE, client_engine)
datacatalog_number_of_rows = datacatalog.shape[0]
if datacatalog_number_of_rows == 0:
logger.info("Creating datacatalog.")
html = requests.get(DATA_VENDOR_URL).text
soup = BeautifulSoup(html, "lxml")
parent_links = [
a["href"]
for a in soup.findAll("a")
if a.has_attr("href")
and not (a["href"].startswith("../") or a["href"].startswith("readme.html"))
]
link_catalog = defaultdict(list)
for parent_link in parent_links:
folder_name = parent_link.split("/")[0]
parent_link = DATA_VENDOR_URL + parent_link
html = requests.get(parent_link).text
soup = BeautifulSoup(html, "lxml")
child_links = [
parent_link + a["href"]
for a in soup.findAll("a")
if a.has_attr("href")
and not (
a["href"].startswith("../") or a["href"].startswith("readme.html")
)
]
link_catalog[folder_name].extend(child_links)
link_catalog = dict(link_catalog)
folder_names = []
file_names = []
resource_urls = []
for folder_name, urls in link_catalog.items():
for url in urls:
file_name = url.split("/")[-1]
folder_names.append(folder_name)
resource_urls.append(url)
file_names.append(file_name)
data_catalog = {
"folder_name": folder_names,
"file_name": file_names,
"resource_url": resource_urls,
"size_in_bytes": 0,
"data_inserted": False,
"number_of_rows": 0,
}
data_catalog = pd.DataFrame(
data_catalog,
columns=[
"folder_name",
"file_name",
"resource_url",
"size_in_bytes",
"data_inserted",
"number_of_rows",
],
)
try:
data_catalog.to_sql(
schema="public",
name=DATACATALOG_TABLE,
con=client_engine,
if_exists="append",
index=False,
method="multi",
chunksize=10_000,
)
logger.info("Datacatalog created.")
except Exception as e:
logger.error(e)
else:
logger.info("Datacatalog already exists.")
client.disconnect()
def create_datalake():
client, client_engine = create_client_engine()
folder_names, file_names = FOLDER_NAMES, FILE_NAMES
datacatalog = pd.read_sql_table(DATACATALOG_TABLE, client_engine)
datacatalog_number_of_rows = datacatalog.shape[0]
if datacatalog_number_of_rows > 0:
for index, row in datacatalog.iterrows():
folder_name = row["folder_name"]
file_name = row["file_name"]
resource_url = row["resource_url"]
size_in_bytes = row["size_in_bytes"]
data_inserted = bool(row["data_inserted"])
number_of_rows = row["number_of_rows"]
if (
folder_names
and len(folder_names) > 0
and not folder_name in folder_names
) or (file_names and len(file_names) > 0 and not file_name in file_names):
continue
folder_path = os.path.join(DATALAKE_DIR, folder_name)
os.makedirs(folder_path, exist_ok=True)
gz_file_path = os.path.join(DATALAKE_DIR, folder_name, file_name)
file_path = gz_file_path.replace(".gz", "")
if (
os.path.exists(gz_file_path) and not os.path.getsize(gz_file_path) > 0
) or not os.path.exists(gz_file_path):
try:
r = requests.get(resource_url, stream=True)
with open(gz_file_path, "wb") as f:
for chunk in r.raw.stream(1024, decode_content=False):
if chunk:
f.write(chunk)
extract(folder_path, gz_file_path)
# with open(file_path, "wb") as f:
# with requests.get(
# resource_url, allow_redirects=True, stream=True
# ) as resp:
# for chunk in resp.iter_content(chunk_size=1024):
# if chunk:
# f.write(chunk)
# with open(file_path, "wb") as f:
# for chunk in r.iter_content(chunk_size=1024):
# if chunk:
# f.write(chunk)
logger.info(
"Data downloaded into datalake for {}.".format(file_name)
)
except Exception as e:
logger.error(e)
elif (
os.path.exists(gz_file_path)
and os.path.getsize(gz_file_path) > 0
and (
not os.path.exists(file_path) or not os.path.getsize(file_path) > 0
)
):
try:
extract(folder_path, gz_file_path)
logger.info(
"Data extracted into datalake for {}.".format(file_name)
)
except Exception as e:
logger.error(e)
else:
logger.info("Data already exists in datalake for {}.".format(file_name))
size_in_bytes = os.path.getsize(file_path)
client.execute(
"""
UPDATE {DATACATALOG_TABLE}
SET size_in_bytes = {size_in_bytes}
WHERE folder_name = '{folder_name}'
AND resource_url = '{resource_url}'
""".format(
DATACATALOG_TABLE=DATACATALOG_TABLE,
folder_name=folder_name,
size_in_bytes=size_in_bytes,
resource_url=resource_url,
)
)
client.disconnect()
def extract_transform_load():
client, client_engine = create_client_engine()
folder_names, file_names = FOLDER_NAMES, FILE_NAMES
datacatalog = | pd.read_sql_table(DATACATALOG_TABLE, client_engine) | pandas.read_sql_table |
import pandas as pd
import numpy as np
import os, glob, logging, sys, time
import dev_capacity_calculation_module
NOW = time.strftime("%Y_%m%d_%H%M")
today = time.strftime('%Y_%m_%d')
## set up the directories
if os.getenv('USERNAME') =='ywang':
BOX_DIR = 'C:\\Users\\{}\\Box\\Modeling and Surveys\\Urban Modeling\\Bay Area UrbanSim\\PBA50'.format(os.getenv('USERNAME'))
M_URBANSIM_DIR = 'M:\\Data\\Urban\\BAUS\\PBA50'
M_SMELT_DIR = 'M:\\Data\\GIS layers\\UrbanSim smelt\\2020 03 12'
GITHUB_PETRALE_DIR = 'C:\\Users\\{}\\Documents\\GitHub\\petrale'.format(os.getenv('USERNAME'))
GITHUB_URBANSIM_DIR = 'C:\\Users\\{}\\Documents\\GitHub\\bayarea_urbansim\\data'.format(os.getenv('USERNAME'))
elif os.getenv('USERNAME') =='lzorn':
BOX_DIR = 'C:\\Users\\{}\\Box\\Modeling and Surveys\\Urban Modeling\\Bay Area UrbanSim\\PBA50'.format(os.getenv('USERNAME'))
M_URBANSIM_DIR = 'M:\\Data\\Urban\\BAUS\\PBA50'
M_SMELT_DIR = 'M:\\Data\\GIS layers\\UrbanSim smelt\\2020 03 12'
GITHUB_PETRALE_DIR = 'X:\\petrale'
GITHUB_URBANSIM_DIR = 'X:\\bayarea_urbansim\\data'
# input file locations
PBA40_ZONING_BOX_DIR = os.path.join(M_URBANSIM_DIR, 'Horizon', 'Large General Input Data')
PBA50_ZONINGMOD_DIR = os.path.join(M_URBANSIM_DIR, 'Final_Blueprint', 'Zoning Modifications')
OTHER_INPUTS_DIR = os.path.join(M_URBANSIM_DIR, 'Final_Blueprint', 'Base zoning', 'input')
# output file location
DATA_OUTPUT_DIR = os.path.join(BOX_DIR, 'Policies\\Base zoning\\outputs')
QA_QC_DIR = os.path.join(BOX_DIR, 'Policies\\Base zoning\\outputs\\QAQC')
LOG_FILE = os.path.join(DATA_OUTPUT_DIR,'{}_plu_boc_combine.log'.format(today))
## Three steps of data clearing - combine PBA40 plu data and BASIS BOC data using p10 parcel geography
## - assign allow residential and/or non-residential development to each parcel;
## - impute max_dua and max_far for parcels missing the info
def impute_max_dua(df_original,boc_source):
"""
Impute max_dua from max_far or max_height
Returns dataframe with PARCEL_ID, max_dua, source_dua_[boc_source]
source_dua is one of: [boc_source]: if it's already set so no imputation is necessary
imputed from max_far
imputed from max_height
missing: if it can't be imputed because max_far and max_height are missing too
Note: For parcels that are nodev or residential development isn't allowed, max_dua isn't important
"""
# don't modify passed df
df = df_original.copy()
logger.info("impute_max_dua_{}: Before imputation, number of parcels with missing max_dua_{}: {:,}".format(
boc_source, boc_source, sum(df['max_dua_'+boc_source].isnull())))
# we can only fill in missing if either max_far or max_height is not null
df['max_dua_from_far'] = \
df['max_far_' +boc_source] * dev_capacity_calculation_module.SQUARE_FEET_PER_ACRE / dev_capacity_calculation_module.SQUARE_FEET_PER_DU
df['max_far_from_height'] = \
df['max_height_' +boc_source] / dev_capacity_calculation_module.FEET_PER_STORY * dev_capacity_calculation_module.PARCEL_USE_EFFICIENCY
df['max_dua_from_height'] = \
df['max_far_from_height'] * dev_capacity_calculation_module.SQUARE_FEET_PER_ACRE / dev_capacity_calculation_module.SQUARE_FEET_PER_DU
# default to missing
df['source_dua_'+boc_source] = 'missing'
# this is set already -- nothing to do
df.loc[(df['max_dua_'+boc_source].notnull()) &
(df['max_dua_'+boc_source] > 0), 'source_dua_'+boc_source] = boc_source
# decide on imputation source
# for missing values, fill from max_far or max_height -- if both are available, use the min unless the min is 0
df.loc[(df['source_dua_'+boc_source]=='missing') &
df.max_dua_from_height.notnull() &
df.max_dua_from_far.notnull() &
(df.max_dua_from_height > df.max_dua_from_far) &
(df.max_dua_from_far > 0), "source_dua_"+boc_source] = 'imputed from max_far (as min)'
df.loc[(df['source_dua_'+boc_source]=='missing') &
df.max_dua_from_height.notnull() &
df.max_dua_from_far.notnull() &
(df.max_dua_from_height > df.max_dua_from_far) &
(df.max_dua_from_far == 0), "source_dua_"+boc_source] = 'imputed from max_height'
df.loc[(df['source_dua_'+boc_source]=='missing') &
df.max_dua_from_height.notnull() &
df.max_dua_from_far.notnull() &
(df.max_dua_from_height < df.max_dua_from_far) &
(df.max_dua_from_height > 0), 'source_dua_'+boc_source] = 'imputed from max_height (as min)'
df.loc[(df['source_dua_'+boc_source]=='missing') &
df.max_dua_from_height.notnull() &
df.max_dua_from_far.notnull() &
(df.max_dua_from_height < df.max_dua_from_far) &
(df.max_dua_from_height == 0), 'source_dua_'+boc_source] = 'imputed from max_far'
df.loc[(df['source_dua_'+boc_source]=='missing') &
(df.max_dua_from_height == 0) &
(df.max_dua_from_far == 0), 'source_dua_'+boc_source] = 'imputed from max_far'
# if only one available use that
df.loc[(df['source_dua_'+boc_source]=="missing") &
df.max_dua_from_height.isnull() &
df.max_dua_from_far.notnull(), 'source_dua_'+boc_source] = 'imputed from max_far'
df.loc[(df['source_dua_'+boc_source]=='missing') &
df.max_dua_from_height.notnull() &
df.max_dua_from_far.isnull(), 'source_dua_'+boc_source] = 'imputed from max_height'
# imputation is decided -- set it
df.loc[ df['source_dua_'+boc_source]=='imputed from max_height (as min)', 'max_dua_'+boc_source] = \
df.loc[df['source_dua_'+boc_source]=='imputed from max_height (as min)', 'max_dua_from_height']
df.loc[ df['source_dua_'+boc_source]=='imputed from max_height', 'max_dua_'+boc_source] = \
df.loc[df['source_dua_'+boc_source]=='imputed from max_height', 'max_dua_from_height']
df.loc[ df['source_dua_'+boc_source]=='imputed from max_far (as min)', 'max_dua_'+boc_source] = \
df.loc[df['source_dua_'+boc_source]=='imputed from max_far (as min)', 'max_dua_from_far']
df.loc[ df['source_dua_'+boc_source]=='imputed from max_far', 'max_dua_'+boc_source] = \
df.loc[df['source_dua_'+boc_source]=='imputed from max_far', 'max_dua_from_far']
logger.info("After imputation: \n{}".format(df['source_dua_'+boc_source].value_counts()))
return df[['PARCEL_ID','max_dua_'+boc_source,'source_dua_'+boc_source]]
def impute_max_far(df_original,boc_source):
"""
Impute max_far from max_height
Returns dataframe with PARCEL_ID, max_far, source_far_[boc_source]
source_far is one of: [boc_source]: if it's already set so no imputation is necessary
imputed from max_height
missing: if it can't be imputed because max_far and max_height are missing too
Note: For parcels that are nodev or nonresidential development isn't allowed, max_far isn't important
"""
# don't modify passed df
df = df_original.copy()
logger.info("impute_max_far_{}: Before imputation, number of parcels with missing max_far_{}: {:,}".format(
boc_source, boc_source, sum(df['max_far_'+boc_source].isnull())))
# we can only fill in missing if max_height is not null
df['max_far_from_height'] = \
df['max_height_' +boc_source] / dev_capacity_calculation_module.FEET_PER_STORY * dev_capacity_calculation_module.PARCEL_USE_EFFICIENCY
# default to missing
df['source_far_'+boc_source] = 'missing'
# this is set already -- nothing to do
df.loc[(df['max_far_'+boc_source].notnull()) &
(df['max_far_'+boc_source] > 0), 'source_far_'+boc_source] = boc_source
# decide on imputation source
# for missing values, fill from max_height
df.loc[(df['source_far_'+boc_source]=='missing') & df.max_far_from_height.notnull(),
'source_far_'+boc_source] = 'imputed from max_height'
# imputation is decided -- set it
df.loc[ df['source_far_'+boc_source]=='imputed from max_height', 'max_far_'+boc_source] = \
df.loc[df['source_far_'+boc_source]=='imputed from max_height', 'max_far_from_height']
logger.info("After imputation: \n{}".format(df['source_far_'+boc_source].value_counts()))
return df[['PARCEL_ID','max_far_'+boc_source,'source_far_'+boc_source]]
def impute_basis_devtypes_from_pba40(df):
"""
Where basis allowed development type is missing, impute value from pba40.
Note this in source_[btype]_basis, which will be set to one of
['basis', 'missing', 'imputed from pba40']
Returns df with [btype]_basis and source_[btype]_basis columns updated
"""
logger.info("impute_basis_devtypes_from_pba40():")
for btype in dev_capacity_calculation_module.ALLOWED_BUILDING_TYPE_CODES:
df['source_'+btype+'_basis'] = 'basis' # default
df.loc[ df[btype+'_basis'].isnull(), 'source_'+btype+'_basis'] = 'missing' # or missing if null
logger.info("Before imputation of {}_basis:\n{}".format(btype, df['source_'+btype+'_basis'].value_counts()))
# if basis value is missing
# and we care about it (nodev_zod == 0)
# and the pba40 value is present
# => impute
impute_idx = ((df[btype+'_basis'].isnull()) & \
(df[btype+'_pba40'].notnull()))
# impute and note source
df.loc[impute_idx, btype+'_basis' ] = df.loc[impute_idx, btype + '_pba40']
df.loc[impute_idx, 'source_'+btype+'_basis' ] = 'imputed from pba40'
logger.info("After imputation of {}_basis:\n{}".format(btype, df['source_'+btype+'_basis'].value_counts()))
logger.info("")
return df
def impute_basis_max_height_from_pba40(df):
"""
Where max_height_basis is missing, impute value from pba40.
Note this in source_height_basis, which will be set to one of
['basis', 'missing', 'imputed from pba40']
Returns df with max_height_basis and source_height_basis columns updated
"""
logger.info("impute_basis_max_height_from_pba40():")
df['source_height_basis'] = 'basis' # default
df.loc[ df['max_height_basis'].isnull(), 'source_height_basis'] = 'missing' # or missing if null
logger.info("Before imputation:\n{}".format(df['source_height_basis'].value_counts()))
# if basis value is missing
# and the pba40 value is present
# => impute
impute_idx = ((df['max_height_basis'].isnull()) & \
(df['max_height_pba40'].notnull()))
# impute and note source
df.loc[impute_idx, 'max_height_basis' ] = df.loc[impute_idx, 'max_height_pba40']
df.loc[impute_idx, 'source_height_basis' ] = 'imputed from pba40'
logger.info("After imputation:\n{}".format(df['source_height_basis'].value_counts()))
return df
if __name__ == '__main__':
# create logger
logger = logging.getLogger(__name__)
logger.setLevel('DEBUG')
# console handler
ch = logging.StreamHandler()
ch.setLevel('INFO')
ch.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p'))
logger.addHandler(ch)
# file handler
fh = logging.FileHandler(LOG_FILE, mode='w')
fh.setLevel('DEBUG')
fh.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p'))
logger.addHandler(fh)
logger.info("BOX_DIR = {}".format(BOX_DIR))
logger.info("M_URBANSIM_DIR = {}".format(M_URBANSIM_DIR))
logger.info("M_SMELT_DIR = {}".format(M_SMELT_DIR))
logger.info("DATA_OUTPUT_DIR = {}".format(DATA_OUTPUT_DIR))
## Basemap parcels
basemap_p10_file = os.path.join(M_SMELT_DIR, 'p10.csv')
basemap_p10 = pd.read_csv(basemap_p10_file,
usecols =['PARCEL_ID','geom_id_s','ACRES','LAND_VALUE'],
dtype ={'PARCEL_ID':np.float64, 'geom_id_s':str, 'ACRES':np.float64, 'LAND_VALUE':np.float64})
# conver PARCEL_ID to integer:
basemap_p10['PARCEL_ID'] = basemap_p10['PARCEL_ID'].apply(lambda x: int(round(x)))
logger.info("Read {:,} rows from {}".format(len(basemap_p10), basemap_p10_file))
logger.info("\n{}".format(basemap_p10.head()))
logger.info('Number of unique PARCEL_ID: {}'.format(len(basemap_p10.PARCEL_ID.unique())))
## p10 pacel to pba40 zoning code mapping
pba40_pz_file = os.path.join(PBA40_ZONING_BOX_DIR, '2015_12_21_zoning_parcels.csv')
pba40_pz = pd.read_csv(
pba40_pz_file,
usecols = ['geom_id','zoning_id','nodev'],
dtype = {'geom_id':str, 'zoning_id':np.float64, 'nodev_pba40':np.int})
logger.info("Read {:,} rows from {}".format(len(pba40_pz), pba40_pz_file))
logger.info("\n{}".format(pba40_pz.head()))
## add zoning_id, nodev_pba40 columns to p10
p10_pba40_pz = | pd.merge(left=basemap_p10, right=pba40_pz, left_on='geom_id_s', right_on = 'geom_id', how='left') | pandas.merge |
#
# Clean up raw run data for "render" files
#
import pandas as pd
import numpy as np
import sys
import os
runNum = '64'
#runNum = '80'
#runNum = '96'
#runNum = '112'
#runNum = '128'
#runNum = '256'
#runNum = '512'
#runNum = '768'
#runNum = '1024'
print( os.getcwd() )
dataFolder = os.getcwd() + '/DataWrangling/20181108Terascope_01/'
inFile =dataFolder+'stats-Render-STOP-'+runNum+'.csv'
outFile = dataFolder+'stats-Render-STOP-shaped-'+runNum+'.csv'
data = pd.read_csv(inFile)
#print ( sys.getsizeof( data ) )
data_out = data.rename( {'timestamp-epoch':'stopTime', 'timestamp':'date'}, axis='columns' )
#Add day so that PBI has the column
data_out['day'] = '08/11/2018'
print( inFile )
print( "Num columns raw: ", data_out.shape)
data_out = data_out.drop_duplicates('taskId')
print( "Num columns taskId filtered: ", data_out.shape)
#
# Ensure that the task indicies when added are in time order.
#
data_out = data_out.sort_values(['stopTime'])
#
# Create task indices and layout X,Y coordinates.
#
data_out.insert(0, 'taskIx', range(0, 0 + data_out.shape[0]) )
a = divmod(data_out['taskIx'],257)
data_out.insert(1, 'taskX', a[0] )
data_out.insert(2, 'taskY', a[1] )
#
# Create host indices and layout X,Y coordinates
#
# Find unique host names
df = pd.DataFrame()
df.insert(0, 'host', data_out['host'] )
df = df.drop_duplicates('host')
#Create host indices
df.insert(0, 'hostIx', range(0, 0 + df.shape[0]) )
a = divmod(df['hostIx'],32)
df.insert(1, 'hostX', a[0] )
df.insert(2, 'hostY', a[1] )
#Merge into original data
data_merged = pd.merge(data_out, df, on='host')
#Set start time from 0
minStop = data_merged['stopTime'].min()
data_merged['stopTime'] = data_merged['stopTime'] - minStop
#
# Label ventiles of stopTime - cut into equal time steps
#
data_merged['stopVentile'] = pd.cut(data_merged['stopTime'], 20, labels=False)
data_merged['durationHalves'] = | pd.cut(data_merged['duration'], 2, labels=False) | pandas.cut |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2014-2019 OpenEEmeter contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import pytest
import pandas as pd
import numpy as np
from eemeter import ModelMetrics
from eemeter.metrics import (
_compute_r_squared,
_compute_r_squared_adj,
_compute_rmse,
_compute_rmse_adj,
_compute_cvrmse,
_compute_cvrmse_adj,
_compute_mape,
_compute_nmae,
_compute_nmbe,
_compute_autocorr_resid,
_json_safe_float,
)
from eemeter.caltrack.usage_per_day import fit_caltrack_usage_per_day_model
@pytest.fixture
def sample_data():
# Could have included DatetimeIndex, but made it more general
series_one = pd.Series([1, 3, 4, 1, 6], name="NameOne")
series_two = pd.Series([2, 3, 3, 2, 4], name="NameTwo")
return series_one, series_two
def test_sample_model_metrics(model_metrics):
assert model_metrics.observed_length == 5
assert model_metrics.predicted_length == 5
assert model_metrics.merged_length == 5
assert model_metrics.observed_mean == 3.0
assert model_metrics.predicted_mean == 2.8
assert round(model_metrics.observed_skew, 3) == 0.524
assert round(model_metrics.predicted_skew, 3) == 0.512
assert round(model_metrics.observed_kurtosis, 3) == -0.963
assert round(model_metrics.predicted_kurtosis, 3) == -0.612
assert round(model_metrics.observed_cvstd, 3) == 0.707
assert round(model_metrics.predicted_cvstd, 3) == 0.299
assert round(model_metrics.r_squared, 3) == 0.972
assert round(model_metrics.r_squared_adj, 3) == 0.944
assert round(model_metrics.cvrmse, 3) == 0.394
assert round(model_metrics.cvrmse_adj, 3) == 0.509
assert round(model_metrics.mape, 3) == 0.517
assert round(model_metrics.mape_no_zeros, 3) == 0.517
assert model_metrics.num_meter_zeros == 0
assert round(model_metrics.nmae, 3) == 0.333
assert round(model_metrics.nmbe, 3) == -0.067
assert round(model_metrics.autocorr_resid, 3) == -0.674
assert round(model_metrics.n_prime, 3) == 25.694
assert round(model_metrics.single_tailed_confidence_level, 3) == 0.95
assert round(model_metrics.degrees_of_freedom, 3) == 24
assert round(model_metrics.t_stat, 3) == 1.711
assert round(model_metrics.cvrmse_auto_corr_correction, 3) == 0.356
assert round(model_metrics.approx_factor_auto_corr_correction, 3) == 1.038
def test_ModelMetrics(sample_data):
series_one, series_two = sample_data
model_metrics = ModelMetrics(series_one, series_two, num_parameters=2)
test_sample_model_metrics(model_metrics)
assert repr(model_metrics) is not None
assert json.dumps(model_metrics.json()) is not None
@pytest.fixture
def sample_data_zeros():
series_one = | pd.Series([1, 0, 0, 1, 6]) | pandas.Series |
"""
A warehouse for constant values required to initilize the PUDL Database.
This constants module stores and organizes a bunch of constant values which are
used throughout PUDL to populate static lists within the data packages or for
data cleaning purposes.
"""
import pandas as pd
import sqlalchemy as sa
######################################################################
# Constants used within the init.py module.
######################################################################
prime_movers = [
'steam_turbine',
'gas_turbine',
'hydro',
'internal_combustion',
'solar_pv',
'wind_turbine'
]
"""list: A list of the types of prime movers"""
rto_iso = {
'CAISO': 'California ISO',
'ERCOT': 'Electric Reliability Council of Texas',
'MISO': 'Midcontinent ISO',
'ISO-NE': 'ISO New England',
'NYISO': 'New York ISO',
'PJM': 'PJM Interconnection',
'SPP': 'Southwest Power Pool'
}
"""dict: A dictionary containing ISO/RTO abbreviations (keys) and names (values)
"""
us_states = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values)
"""
canada_prov_terr = {
'AB': 'Alberta',
'BC': 'British Columbia',
'CN': 'Canada',
'MB': 'Manitoba',
'NB': 'New Brunswick',
'NS': 'Nova Scotia',
'NL': 'Newfoundland and Labrador',
'NT': 'Northwest Territories',
'NU': 'Nunavut',
'ON': 'Ontario',
'PE': 'Prince Edwards Island',
'QC': 'Quebec',
'SK': 'Saskatchewan',
'YT': 'Yukon Territory',
}
"""dict: A dictionary containing Canadian provinces' and territories'
abbreviations (keys) and names (values)
"""
cems_states = {k: v for k, v in us_states.items() if v not in
{'Alaska',
'American Samoa',
'Guam',
'Hawaii',
'Northern Mariana Islands',
'National',
'Puerto Rico',
'Virgin Islands'}
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values) that are present in the CEMS dataset
"""
# This is imperfect for states that have split timezones. See:
# https://en.wikipedia.org/wiki/List_of_time_offsets_by_U.S._state_and_territory
# For states that are split, I went with where there seem to be more people
# List of timezones in pytz.common_timezones
# Canada: https://en.wikipedia.org/wiki/Time_in_Canada#IANA_time_zone_database
state_tz_approx = {
"AK": "US/Alaska", # Alaska; Not in CEMS
"AL": "US/Central", # Alabama
"AR": "US/Central", # Arkansas
"AS": "Pacific/Pago_Pago", # American Samoa; Not in CEMS
"AZ": "US/Arizona", # Arizona
"CA": "US/Pacific", # California
"CO": "US/Mountain", # Colorado
"CT": "US/Eastern", # Connecticut
"DC": "US/Eastern", # District of Columbia
"DE": "US/Eastern", # Delaware
"FL": "US/Eastern", # Florida (split state)
"GA": "US/Eastern", # Georgia
"GU": "Pacific/Guam", # Guam; Not in CEMS
"HI": "US/Hawaii", # Hawaii; Not in CEMS
"IA": "US/Central", # Iowa
"ID": "US/Mountain", # Idaho (split state)
"IL": "US/Central", # Illinois
"IN": "US/Eastern", # Indiana (split state)
"KS": "US/Central", # Kansas (split state)
"KY": "US/Eastern", # Kentucky (split state)
"LA": "US/Central", # Louisiana
"MA": "US/Eastern", # Massachusetts
"MD": "US/Eastern", # Maryland
"ME": "US/Eastern", # Maine
"MI": "America/Detroit", # Michigan (split state)
"MN": "US/Central", # Minnesota
"MO": "US/Central", # Missouri
"MP": "Pacific/Saipan", # Northern Mariana Islands; Not in CEMS
"MS": "US/Central", # Mississippi
"MT": "US/Mountain", # Montana
"NC": "US/Eastern", # North Carolina
"ND": "US/Central", # North Dakota (split state)
"NE": "US/Central", # Nebraska (split state)
"NH": "US/Eastern", # New Hampshire
"NJ": "US/Eastern", # New Jersey
"NM": "US/Mountain", # New Mexico
"NV": "US/Pacific", # Nevada
"NY": "US/Eastern", # New York
"OH": "US/Eastern", # Ohio
"OK": "US/Central", # Oklahoma
"OR": "US/Pacific", # Oregon (split state)
"PA": "US/Eastern", # Pennsylvania
"PR": "America/Puerto_Rico", # Puerto Rico; Not in CEMS
"RI": "US/Eastern", # Rhode Island
"SC": "US/Eastern", # South Carolina
"SD": "US/Central", # South Dakota (split state)
"TN": "US/Central", # Tennessee
"TX": "US/Central", # Texas
"UT": "US/Mountain", # Utah
"VA": "US/Eastern", # Virginia
"VI": "America/Puerto_Rico", # Virgin Islands; Not in CEMS
"VT": "US/Eastern", # Vermont
"WA": "US/Pacific", # Washington
"WI": "US/Central", # Wisconsin
"WV": "US/Eastern", # West Virginia
"WY": "US/Mountain", # Wyoming
# Canada (none of these are in CEMS)
"AB": "America/Edmonton", # Alberta
"BC": "America/Vancouver", # British Columbia (split province)
"MB": "America/Winnipeg", # Manitoba
"NB": "America/Moncton", # New Brunswick
"NS": "America/Halifax", # Nova Scotia
"NL": "America/St_Johns", # Newfoundland and Labrador (split province)
"NT": "America/Yellowknife", # Northwest Territories (split province)
"NU": "America/Iqaluit", # Nunavut (split province)
"ON": "America/Toronto", # Ontario (split province)
"PE": "America/Halifax", # Prince Edwards Island
"QC": "America/Montreal", # Quebec (split province)
"SK": "America/Regina", # Saskatchewan (split province)
"YT": "America/Whitehorse", # Yukon Territory
}
"""dict: A dictionary containing US and Canadian state/territory abbreviations
(keys) and timezones (values)
"""
ferc1_power_purchase_type = {
'RQ': 'requirement',
'LF': 'long_firm',
'IF': 'intermediate_firm',
'SF': 'short_firm',
'LU': 'long_unit',
'IU': 'intermediate_unit',
'EX': 'electricity_exchange',
'OS': 'other_service',
'AD': 'adjustment'
}
"""dict: A dictionary of abbreviations (keys) and types (values) for power
purchase agreements from FERC Form 1.
"""
# Dictionary mapping DBF files (w/o .DBF file extension) to DB table names
ferc1_dbf2tbl = {
'F1_1': 'f1_respondent_id',
'F1_2': 'f1_acb_epda',
'F1_3': 'f1_accumdepr_prvsn',
'F1_4': 'f1_accumdfrrdtaxcr',
'F1_5': 'f1_adit_190_detail',
'F1_6': 'f1_adit_190_notes',
'F1_7': 'f1_adit_amrt_prop',
'F1_8': 'f1_adit_other',
'F1_9': 'f1_adit_other_prop',
'F1_10': 'f1_allowances',
'F1_11': 'f1_bal_sheet_cr',
'F1_12': 'f1_capital_stock',
'F1_13': 'f1_cash_flow',
'F1_14': 'f1_cmmn_utlty_p_e',
'F1_15': 'f1_comp_balance_db',
'F1_16': 'f1_construction',
'F1_17': 'f1_control_respdnt',
'F1_18': 'f1_co_directors',
'F1_19': 'f1_cptl_stk_expns',
'F1_20': 'f1_csscslc_pcsircs',
'F1_21': 'f1_dacs_epda',
'F1_22': 'f1_dscnt_cptl_stk',
'F1_23': 'f1_edcfu_epda',
'F1_24': 'f1_elctrc_erg_acct',
'F1_25': 'f1_elctrc_oper_rev',
'F1_26': 'f1_elc_oper_rev_nb',
'F1_27': 'f1_elc_op_mnt_expn',
'F1_28': 'f1_electric',
'F1_29': 'f1_envrnmntl_expns',
'F1_30': 'f1_envrnmntl_fclty',
'F1_31': 'f1_fuel',
'F1_32': 'f1_general_info',
'F1_33': 'f1_gnrt_plant',
'F1_34': 'f1_important_chg',
'F1_35': 'f1_incm_stmnt_2',
'F1_36': 'f1_income_stmnt',
'F1_37': 'f1_miscgen_expnelc',
'F1_38': 'f1_misc_dfrrd_dr',
'F1_39': 'f1_mthly_peak_otpt',
'F1_40': 'f1_mtrl_spply',
'F1_41': 'f1_nbr_elc_deptemp',
'F1_42': 'f1_nonutility_prop',
'F1_43': 'f1_note_fin_stmnt', # 37% of DB
'F1_44': 'f1_nuclear_fuel',
'F1_45': 'f1_officers_co',
'F1_46': 'f1_othr_dfrrd_cr',
'F1_47': 'f1_othr_pd_in_cptl',
'F1_48': 'f1_othr_reg_assets',
'F1_49': 'f1_othr_reg_liab',
'F1_50': 'f1_overhead',
'F1_51': 'f1_pccidica',
'F1_52': 'f1_plant_in_srvce',
'F1_53': 'f1_pumped_storage',
'F1_54': 'f1_purchased_pwr',
'F1_55': 'f1_reconrpt_netinc',
'F1_56': 'f1_reg_comm_expn',
'F1_57': 'f1_respdnt_control',
'F1_58': 'f1_retained_erng',
'F1_59': 'f1_r_d_demo_actvty',
'F1_60': 'f1_sales_by_sched',
'F1_61': 'f1_sale_for_resale',
'F1_62': 'f1_sbsdry_totals',
'F1_63': 'f1_schedules_list',
'F1_64': 'f1_security_holder',
'F1_65': 'f1_slry_wg_dstrbtn',
'F1_66': 'f1_substations',
'F1_67': 'f1_taxacc_ppchrgyr',
'F1_68': 'f1_unrcvrd_cost',
'F1_69': 'f1_utltyplnt_smmry',
'F1_70': 'f1_work',
'F1_71': 'f1_xmssn_adds',
'F1_72': 'f1_xmssn_elc_bothr',
'F1_73': 'f1_xmssn_elc_fothr',
'F1_74': 'f1_xmssn_line',
'F1_75': 'f1_xtraordnry_loss',
'F1_76': 'f1_codes_val',
'F1_77': 'f1_sched_lit_tbl',
'F1_78': 'f1_audit_log',
'F1_79': 'f1_col_lit_tbl',
'F1_80': 'f1_load_file_names',
'F1_81': 'f1_privilege',
'F1_82': 'f1_sys_error_log',
'F1_83': 'f1_unique_num_val',
'F1_84': 'f1_row_lit_tbl',
'F1_85': 'f1_footnote_data',
'F1_86': 'f1_hydro',
'F1_87': 'f1_footnote_tbl', # 52% of DB
'F1_88': 'f1_ident_attsttn',
'F1_89': 'f1_steam',
'F1_90': 'f1_leased',
'F1_91': 'f1_sbsdry_detail',
'F1_92': 'f1_plant',
'F1_93': 'f1_long_term_debt',
'F1_106_2009': 'f1_106_2009',
'F1_106A_2009': 'f1_106a_2009',
'F1_106B_2009': 'f1_106b_2009',
'F1_208_ELC_DEP': 'f1_208_elc_dep',
'F1_231_TRN_STDYCST': 'f1_231_trn_stdycst',
'F1_324_ELC_EXPNS': 'f1_324_elc_expns',
'F1_325_ELC_CUST': 'f1_325_elc_cust',
'F1_331_TRANSISO': 'f1_331_transiso',
'F1_338_DEP_DEPL': 'f1_338_dep_depl',
'F1_397_ISORTO_STL': 'f1_397_isorto_stl',
'F1_398_ANCL_PS': 'f1_398_ancl_ps',
'F1_399_MTH_PEAK': 'f1_399_mth_peak',
'F1_400_SYS_PEAK': 'f1_400_sys_peak',
'F1_400A_ISO_PEAK': 'f1_400a_iso_peak',
'F1_429_TRANS_AFF': 'f1_429_trans_aff',
'F1_ALLOWANCES_NOX': 'f1_allowances_nox',
'F1_CMPINC_HEDGE_A': 'f1_cmpinc_hedge_a',
'F1_CMPINC_HEDGE': 'f1_cmpinc_hedge',
'F1_EMAIL': 'f1_email',
'F1_RG_TRN_SRV_REV': 'f1_rg_trn_srv_rev',
'F1_S0_CHECKS': 'f1_s0_checks',
'F1_S0_FILING_LOG': 'f1_s0_filing_log',
'F1_SECURITY': 'f1_security'
# 'F1_PINS': 'f1_pins', # private data, not publicized.
# 'F1_FREEZE': 'f1_freeze', # private data, not publicized
}
"""dict: A dictionary mapping FERC Form 1 DBF files(w / o .DBF file extension)
(keys) to database table names (values).
"""
ferc1_huge_tables = {
'f1_footnote_tbl',
'f1_footnote_data',
'f1_note_fin_stmnt',
}
"""set: A set containing large FERC Form 1 tables.
"""
# Invert the map above so we can go either way as needed
ferc1_tbl2dbf = {v: k for k, v in ferc1_dbf2tbl.items()}
"""dict: A dictionary mapping database table names (keys) to FERC Form 1 DBF
files(w / o .DBF file extension) (values).
"""
# This dictionary maps the strings which are used to denote field types in the
# DBF objects to the corresponding generic SQLAlchemy Column types:
# These definitions come from a combination of the dbfread example program
# dbf2sqlite and this DBF file format documentation page:
# http://www.dbase.com/KnowledgeBase/int/db7_file_fmt.htm
# Un-mapped types left as 'XXX' which should obviously make an error...
dbf_typemap = {
'C': sa.String,
'D': sa.Date,
'F': sa.Float,
'I': sa.Integer,
'L': sa.Boolean,
'M': sa.Text, # 10 digit .DBT block number, stored as a string...
'N': sa.Float,
'T': sa.DateTime,
'0': sa.Integer, # based on dbf2sqlite mapping
'B': 'XXX', # .DBT block number, binary string
'@': 'XXX', # Timestamp... Date = Julian Day, Time is in milliseconds?
'+': 'XXX', # Autoincrement (e.g. for IDs)
'O': 'XXX', # Double, 8 bytes
'G': 'XXX', # OLE 10 digit/byte number of a .DBT block, stored as string
}
"""dict: A dictionary mapping field types in the DBF objects (keys) to the
corresponding generic SQLAlchemy Column types.
"""
# This is the set of tables which have been successfully integrated into PUDL:
ferc1_pudl_tables = (
'fuel_ferc1', # Plant-level data, linked to plants_steam_ferc1
'plants_steam_ferc1', # Plant-level data
'plants_small_ferc1', # Plant-level data
'plants_hydro_ferc1', # Plant-level data
'plants_pumped_storage_ferc1', # Plant-level data
'purchased_power_ferc1', # Inter-utility electricity transactions
'plant_in_service_ferc1', # Row-mapped plant accounting data.
# 'accumulated_depreciation_ferc1' # Requires row-mapping to be useful.
)
"""tuple: A tuple containing the FERC Form 1 tables that can be successfully
integrated into PUDL.
"""
table_map_ferc1_pudl = {
'fuel_ferc1': 'f1_fuel',
'plants_steam_ferc1': 'f1_steam',
'plants_small_ferc1': 'f1_gnrt_plant',
'plants_hydro_ferc1': 'f1_hydro',
'plants_pumped_storage_ferc1': 'f1_pumped_storage',
'plant_in_service_ferc1': 'f1_plant_in_srvce',
'purchased_power_ferc1': 'f1_purchased_pwr',
# 'accumulated_depreciation_ferc1': 'f1_accumdepr_prvsn'
}
"""dict: A dictionary mapping PUDL table names (keys) to the corresponding FERC
Form 1 DBF table names.
"""
# This is the list of EIA923 tables that can be successfully pulled into PUDL
eia923_pudl_tables = ('generation_fuel_eia923',
'boiler_fuel_eia923',
'generation_eia923',
'coalmine_eia923',
'fuel_receipts_costs_eia923')
"""tuple: A tuple containing the EIA923 tables that can be successfully
integrated into PUDL.
"""
epaipm_pudl_tables = (
'transmission_single_epaipm',
'transmission_joint_epaipm',
'load_curves_epaipm',
'plant_region_map_epaipm',
)
"""tuple: A tuple containing the EPA IPM tables that can be successfully
integrated into PUDL.
"""
# List of entity tables
entity_tables = ['utilities_entity_eia',
'plants_entity_eia',
'generators_entity_eia',
'boilers_entity_eia',
'regions_entity_epaipm', ]
"""list: A list of PUDL entity tables.
"""
xlsx_maps_pkg = 'pudl.package_data.meta.xlsx_maps'
"""string: The location of the xlsx maps within the PUDL package data."""
##############################################################################
# EIA 923 Spreadsheet Metadata
##############################################################################
##############################################################################
# EIA 860 Spreadsheet Metadata
##############################################################################
# This is the list of EIA860 tables that can be successfully pulled into PUDL
eia860_pudl_tables = (
'boiler_generator_assn_eia860',
'utilities_eia860',
'plants_eia860',
'generators_eia860',
'ownership_eia860'
)
"""tuple: A tuple enumerating EIA 860 tables for which PUDL's ETL works."""
# The set of FERC Form 1 tables that have the same composite primary keys: [
# respondent_id, report_year, report_prd, row_number, spplmnt_num ].
# TODO: THIS ONLY PERTAINS TO 2015 AND MAY NEED TO BE ADJUSTED BY YEAR...
ferc1_data_tables = (
'f1_acb_epda', 'f1_accumdepr_prvsn', 'f1_accumdfrrdtaxcr',
'f1_adit_190_detail', 'f1_adit_190_notes', 'f1_adit_amrt_prop',
'f1_adit_other', 'f1_adit_other_prop', 'f1_allowances', 'f1_bal_sheet_cr',
'f1_capital_stock', 'f1_cash_flow', 'f1_cmmn_utlty_p_e',
'f1_comp_balance_db', 'f1_construction', 'f1_control_respdnt',
'f1_co_directors', 'f1_cptl_stk_expns', 'f1_csscslc_pcsircs',
'f1_dacs_epda', 'f1_dscnt_cptl_stk', 'f1_edcfu_epda', 'f1_elctrc_erg_acct',
'f1_elctrc_oper_rev', 'f1_elc_oper_rev_nb', 'f1_elc_op_mnt_expn',
'f1_electric', 'f1_envrnmntl_expns', 'f1_envrnmntl_fclty', 'f1_fuel',
'f1_general_info', 'f1_gnrt_plant', 'f1_important_chg', 'f1_incm_stmnt_2',
'f1_income_stmnt', 'f1_miscgen_expnelc', 'f1_misc_dfrrd_dr',
'f1_mthly_peak_otpt', 'f1_mtrl_spply', 'f1_nbr_elc_deptemp',
'f1_nonutility_prop', 'f1_note_fin_stmnt', 'f1_nuclear_fuel',
'f1_officers_co', 'f1_othr_dfrrd_cr', 'f1_othr_pd_in_cptl',
'f1_othr_reg_assets', 'f1_othr_reg_liab', 'f1_overhead', 'f1_pccidica',
'f1_plant_in_srvce', 'f1_pumped_storage', 'f1_purchased_pwr',
'f1_reconrpt_netinc', 'f1_reg_comm_expn', 'f1_respdnt_control',
'f1_retained_erng', 'f1_r_d_demo_actvty', 'f1_sales_by_sched',
'f1_sale_for_resale', 'f1_sbsdry_totals', 'f1_schedules_list',
'f1_security_holder', 'f1_slry_wg_dstrbtn', 'f1_substations',
'f1_taxacc_ppchrgyr', 'f1_unrcvrd_cost', 'f1_utltyplnt_smmry', 'f1_work',
'f1_xmssn_adds', 'f1_xmssn_elc_bothr', 'f1_xmssn_elc_fothr',
'f1_xmssn_line', 'f1_xtraordnry_loss',
'f1_hydro', 'f1_steam', 'f1_leased', 'f1_sbsdry_detail',
'f1_plant', 'f1_long_term_debt', 'f1_106_2009', 'f1_106a_2009',
'f1_106b_2009', 'f1_208_elc_dep', 'f1_231_trn_stdycst', 'f1_324_elc_expns',
'f1_325_elc_cust', 'f1_331_transiso', 'f1_338_dep_depl',
'f1_397_isorto_stl', 'f1_398_ancl_ps', 'f1_399_mth_peak',
'f1_400_sys_peak', 'f1_400a_iso_peak', 'f1_429_trans_aff',
'f1_allowances_nox', 'f1_cmpinc_hedge_a', 'f1_cmpinc_hedge',
'f1_rg_trn_srv_rev')
"""tuple: A tuple containing the FERC Form 1 tables that have the same composite
primary keys: [respondent_id, report_year, report_prd, row_number,
spplmnt_num].
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 pages 204-207, Electric Plant in Service.
# Descriptions from: https://www.law.cornell.edu/cfr/text/18/part-101
ferc_electric_plant_accounts = pd.DataFrame.from_records([
# 1. Intangible Plant
(2, '301', 'Intangible: Organization'),
(3, '302', 'Intangible: Franchises and consents'),
(4, '303', 'Intangible: Miscellaneous intangible plant'),
(5, 'subtotal_intangible', 'Subtotal: Intangible Plant'),
# 2. Production Plant
# A. steam production
(8, '310', 'Steam production: Land and land rights'),
(9, '311', 'Steam production: Structures and improvements'),
(10, '312', 'Steam production: Boiler plant equipment'),
(11, '313', 'Steam production: Engines and engine-driven generators'),
(12, '314', 'Steam production: Turbogenerator units'),
(13, '315', 'Steam production: Accessory electric equipment'),
(14, '316', 'Steam production: Miscellaneous power plant equipment'),
(15, '317', 'Steam production: Asset retirement costs for steam production\
plant'),
(16, 'subtotal_steam_production', 'Subtotal: Steam Production Plant'),
# B. nuclear production
(18, '320', 'Nuclear production: Land and land rights (Major only)'),
(19, '321', 'Nuclear production: Structures and improvements (Major\
only)'),
(20, '322', 'Nuclear production: Reactor plant equipment (Major only)'),
(21, '323', 'Nuclear production: Turbogenerator units (Major only)'),
(22, '324', 'Nuclear production: Accessory electric equipment (Major\
only)'),
(23, '325', 'Nuclear production: Miscellaneous power plant equipment\
(Major only)'),
(24, '326', 'Nuclear production: Asset retirement costs for nuclear\
production plant (Major only)'),
(25, 'subtotal_nuclear_produciton', 'Subtotal: Nuclear Production Plant'),
# C. hydraulic production
(27, '330', 'Hydraulic production: Land and land rights'),
(28, '331', 'Hydraulic production: Structures and improvements'),
(29, '332', 'Hydraulic production: Reservoirs, dams, and waterways'),
(30, '333', 'Hydraulic production: Water wheels, turbines and generators'),
(31, '334', 'Hydraulic production: Accessory electric equipment'),
(32, '335', 'Hydraulic production: Miscellaneous power plant equipment'),
(33, '336', 'Hydraulic production: Roads, railroads and bridges'),
(34, '337', 'Hydraulic production: Asset retirement costs for hydraulic\
production plant'),
(35, 'subtotal_hydraulic_production', 'Subtotal: Hydraulic Production\
Plant'),
# D. other production
(37, '340', 'Other production: Land and land rights'),
(38, '341', 'Other production: Structures and improvements'),
(39, '342', 'Other production: Fuel holders, producers, and accessories'),
(40, '343', 'Other production: Prime movers'),
(41, '344', 'Other production: Generators'),
(42, '345', 'Other production: Accessory electric equipment'),
(43, '346', 'Other production: Miscellaneous power plant equipment'),
(44, '347', 'Other production: Asset retirement costs for other production\
plant'),
(None, '348', 'Other production: Energy Storage Equipment'),
(45, 'subtotal_other_production', 'Subtotal: Other Production Plant'),
(46, 'subtotal_production', 'Subtotal: Production Plant'),
# 3. Transmission Plant,
(48, '350', 'Transmission: Land and land rights'),
(None, '351', 'Transmission: Energy Storage Equipment'),
(49, '352', 'Transmission: Structures and improvements'),
(50, '353', 'Transmission: Station equipment'),
(51, '354', 'Transmission: Towers and fixtures'),
(52, '355', 'Transmission: Poles and fixtures'),
(53, '356', 'Transmission: Overhead conductors and devices'),
(54, '357', 'Transmission: Underground conduit'),
(55, '358', 'Transmission: Underground conductors and devices'),
(56, '359', 'Transmission: Roads and trails'),
(57, '359.1', 'Transmission: Asset retirement costs for transmission\
plant'),
(58, 'subtotal_transmission', 'Subtotal: Transmission Plant'),
# 4. Distribution Plant
(60, '360', 'Distribution: Land and land rights'),
(61, '361', 'Distribution: Structures and improvements'),
(62, '362', 'Distribution: Station equipment'),
(63, '363', 'Distribution: Storage battery equipment'),
(64, '364', 'Distribution: Poles, towers and fixtures'),
(65, '365', 'Distribution: Overhead conductors and devices'),
(66, '366', 'Distribution: Underground conduit'),
(67, '367', 'Distribution: Underground conductors and devices'),
(68, '368', 'Distribution: Line transformers'),
(69, '369', 'Distribution: Services'),
(70, '370', 'Distribution: Meters'),
(71, '371', 'Distribution: Installations on customers\' premises'),
(72, '372', 'Distribution: Leased property on customers\' premises'),
(73, '373', 'Distribution: Street lighting and signal systems'),
(74, '374', 'Distribution: Asset retirement costs for distribution plant'),
(75, 'subtotal_distribution', 'Subtotal: Distribution Plant'),
# 5. Regional Transmission and Market Operation Plant
(77, '380', 'Regional transmission: Land and land rights'),
(78, '381', 'Regional transmission: Structures and improvements'),
(79, '382', 'Regional transmission: Computer hardware'),
(80, '383', 'Regional transmission: Computer software'),
(81, '384', 'Regional transmission: Communication Equipment'),
(82, '385', 'Regional transmission: Miscellaneous Regional Transmission\
and Market Operation Plant'),
(83, '386', 'Regional transmission: Asset Retirement Costs for Regional\
Transmission and Market Operation\
Plant'),
(84, 'subtotal_regional_transmission', 'Subtotal: Transmission and Market\
Operation Plant'),
(None, '387', 'Regional transmission: [Reserved]'),
# 6. General Plant
(86, '389', 'General: Land and land rights'),
(87, '390', 'General: Structures and improvements'),
(88, '391', 'General: Office furniture and equipment'),
(89, '392', 'General: Transportation equipment'),
(90, '393', 'General: Stores equipment'),
(91, '394', 'General: Tools, shop and garage equipment'),
(92, '395', 'General: Laboratory equipment'),
(93, '396', 'General: Power operated equipment'),
(94, '397', 'General: Communication equipment'),
(95, '398', 'General: Miscellaneous equipment'),
(96, 'subtotal_general', 'Subtotal: General Plant'),
(97, '399', 'General: Other tangible property'),
(98, '399.1', 'General: Asset retirement costs for general plant'),
(99, 'total_general', 'TOTAL General Plant'),
(100, '101_and_106', 'Electric plant in service (Major only)'),
(101, '102_purchased', 'Electric plant purchased'),
(102, '102_sold', 'Electric plant sold'),
(103, '103', 'Experimental plant unclassified'),
(104, 'total_electric_plant', 'TOTAL Electric Plant in Service')],
columns=['row_number', 'ferc_account_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 pages 204 - 207, Electric Plant in
Service.
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 page 219, ACCUMULATED PROVISION FOR DEPRECIATION
# OF ELECTRIC UTILITY PLANT (Account 108).
ferc_accumulated_depreciation = pd.DataFrame.from_records([
# Section A. Balances and Changes During Year
(1, 'balance_beginning_of_year', 'Balance Beginning of Year'),
(3, 'depreciation_expense', '(403) Depreciation Expense'),
(4, 'depreciation_expense_asset_retirement', \
'(403.1) Depreciation Expense for Asset Retirement Costs'),
(5, 'expense_electric_plant_leased_to_others', \
'(413) Exp. of Elec. Plt. Leas. to Others'),
(6, 'transportation_expenses_clearing',\
'Transportation Expenses-Clearing'),
(7, 'other_clearing_accounts', 'Other Clearing Accounts'),
(8, 'other_accounts_specified',\
'Other Accounts (Specify, details in footnote):'),
# blank: might also be other charges like line 17.
(9, 'other_charges', 'Other Charges:'),
(10, 'total_depreciation_provision_for_year',\
'TOTAL Deprec. Prov for Year (Enter Total of lines 3 thru 9)'),
(11, 'net_charges_for_plant_retired', 'Net Charges for Plant Retired:'),
(12, 'book_cost_of_plant_retired', 'Book Cost of Plant Retired'),
(13, 'cost_of_removal', 'Cost of Removal'),
(14, 'salvage_credit', 'Salvage (Credit)'),
(15, 'total_net_charges_for_plant_retired',\
'TOTAL Net Chrgs. for Plant Ret. (Enter Total of lines 12 thru 14)'),
(16, 'other_debit_or_credit_items',\
'Other Debit or Cr. Items (Describe, details in footnote):'),
# blank: can be "Other Charges", e.g. in 2012 for PSCo.
(17, 'other_charges_2', 'Other Charges 2'),
(18, 'book_cost_or_asset_retirement_costs_retired',\
'Book Cost or Asset Retirement Costs Retired'),
(19, 'balance_end_of_year', \
'Balance End of Year (Enter Totals of lines 1, 10, 15, 16, and 18)'),
# Section B. Balances at End of Year According to Functional Classification
(20, 'steam_production_end_of_year', 'Steam Production'),
(21, 'nuclear_production_end_of_year', 'Nuclear Production'),
(22, 'hydraulic_production_end_of_year',\
'Hydraulic Production-Conventional'),
(23, 'pumped_storage_end_of_year', 'Hydraulic Production-Pumped Storage'),
(24, 'other_production', 'Other Production'),
(25, 'transmission', 'Transmission'),
(26, 'distribution', 'Distribution'),
(27, 'regional_transmission_and_market_operation',
'Regional Transmission and Market Operation'),
(28, 'general', 'General'),
(29, 'total', 'TOTAL (Enter Total of lines 20 thru 28)')],
columns=['row_number', 'line_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 page 219, Accumulated Provision for
Depreciation of electric utility plant(Account 108).
"""
######################################################################
# Constants from EIA From 923 used within init.py module
######################################################################
# From Page 7 of EIA Form 923, Census Region a US state is located in
census_region = {
'NEW': 'New England',
'MAT': 'Middle Atlantic',
'SAT': 'South Atlantic',
'ESC': 'East South Central',
'WSC': 'West South Central',
'ENC': 'East North Central',
'WNC': 'West North Central',
'MTN': 'Mountain',
'PACC': 'Pacific Contiguous (OR, WA, CA)',
'PACN': 'Pacific Non-Contiguous (AK, HI)',
}
"""dict: A dictionary mapping Census Region abbreviations (keys) to Census
Region names (values).
"""
# From Page 7 of EIA Form923
# Static list of NERC (North American Electric Reliability Corporation)
# regions, used for where plant is located
nerc_region = {
'NPCC': 'Northeast Power Coordinating Council',
'ASCC': 'Alaska Systems Coordinating Council',
'HICC': 'Hawaiian Islands Coordinating Council',
'MRO': 'Midwest Reliability Organization',
'SERC': 'SERC Reliability Corporation',
'RFC': 'Reliability First Corporation',
'SPP': 'Southwest Power Pool',
'TRE': 'Texas Regional Entity',
'FRCC': 'Florida Reliability Coordinating Council',
'WECC': 'Western Electricity Coordinating Council'
}
"""dict: A dictionary mapping NERC Region abbreviations (keys) to NERC
Region names (values).
"""
# From Page 7 of EIA Form 923 EIA’s internal consolidated NAICS sectors.
# For internal purposes, EIA consolidates NAICS categories into seven groups.
sector_eia = {
# traditional regulated electric utilities
'1': 'Electric Utility',
# Independent power producers which are not cogenerators
'2': 'NAICS-22 Non-Cogen',
# Independent power producers which are cogenerators, but whose
# primary business purpose is the sale of electricity to the public
'3': 'NAICS-22 Cogen',
# Commercial non-cogeneration facilities that produce electric power,
# are connected to the gird, and can sell power to the public
'4': 'Commercial NAICS Non-Cogen',
# Commercial cogeneration facilities that produce electric power, are
# connected to the grid, and can sell power to the public
'5': 'Commercial NAICS Cogen',
# Industrial non-cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'6': 'Industrial NAICS Non-Cogen',
# Industrial cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'7': 'Industrial NAICS Cogen'
}
"""dict: A dictionary mapping EIA numeric codes (keys) to EIA’s internal
consolidated NAICS sectors (values).
"""
# EIA 923: EIA Type of prime mover:
prime_movers_eia923 = {
'BA': 'Energy Storage, Battery',
'BT': 'Turbines Used in a Binary Cycle. Including those used for geothermal applications',
'CA': 'Combined-Cycle -- Steam Part',
'CC': 'Combined-Cycle, Total Unit',
'CE': 'Energy Storage, Compressed Air',
'CP': 'Energy Storage, Concentrated Solar Power',
'CS': 'Combined-Cycle Single-Shaft Combustion Turbine and Steam Turbine share of single',
'CT': 'Combined-Cycle Combustion Turbine Part',
'ES': 'Energy Storage, Other (Specify on Schedule 9, Comments)',
'FC': 'Fuel Cell',
'FW': 'Energy Storage, Flywheel',
'GT': 'Combustion (Gas) Turbine. Including Jet Engine design',
'HA': 'Hydrokinetic, Axial Flow Turbine',
'HB': 'Hydrokinetic, Wave Buoy',
'HK': 'Hydrokinetic, Other',
'HY': 'Hydraulic Turbine. Including turbines associated with delivery of water by pipeline.',
'IC': 'Internal Combustion (diesel, piston, reciprocating) Engine',
'PS': 'Energy Storage, Reversible Hydraulic Turbine (Pumped Storage)',
'OT': 'Other',
'ST': 'Steam Turbine. Including Nuclear, Geothermal, and Solar Steam (does not include Combined Cycle).',
'PV': 'Photovoltaic',
'WT': 'Wind Turbine, Onshore',
'WS': 'Wind Turbine, Offshore'
}
"""dict: A dictionary mapping EIA 923 prime mover codes (keys) and prime mover
names / descriptions (values).
"""
# EIA 923: The fuel code reported to EIA.Two or three letter alphanumeric:
fuel_type_eia923 = {
'AB': 'Agricultural By-Products',
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BIT': 'Bituminous Coal',
'BLQ': 'Black Liquor',
'CBL': 'Coal, Blended',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'GEO': 'Geothermal',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LFG': 'Landfill Gas',
'LIG': 'Lignite Coal',
'MSB': 'Biogenic Municipal Solid Waste',
'MSN': 'Non-biogenic Municipal Solid Waste',
'MSW': 'Municipal Solid Waste',
'MWH': 'Electricity used for energy storage',
'NG': 'Natural Gas',
'NUC': 'Nuclear. Including Uranium, Plutonium, and Thorium.',
'OBG': 'Other Biomass Gas. Including digester gas, methane, and other biomass gases.',
'OBL': 'Other Biomass Liquids',
'OBS': 'Other Biomass Solids',
'OG': 'Other Gas',
'OTH': 'Other Fuel',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propane',
'PUR': 'Purchased Steam',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SGC': 'Coal-Derived Synthesis Gas',
'SGP': 'Synthesis Gas from Petroleum Coke',
'SLW': 'Sludge Waste',
'SUB': 'Subbituminous Coal',
'SUN': 'Solar',
'TDF': 'Tire-derived Fuels',
'WAT': 'Water at a Conventional Hydroelectric Turbine and water used in Wave Buoy Hydrokinetic Technology, current Hydrokinetic Technology, Tidal Hydrokinetic Technology, and Pumping Energy for Reversible (Pumped Storage) Hydroelectric Turbines.',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WDL': 'Wood Waste Liquids, excluding Black Liquor. Including red liquor, sludge wood, spent sulfite liquor, and other wood-based liquids.',
'WDS': 'Wood/Wood Waste Solids. Including paper pellets, railroad ties, utility polies, wood chips, bark, and other wood waste solids.',
'WH': 'Waste Heat not directly attributed to a fuel source',
'WND': 'Wind',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.'
}
"""dict: A dictionary mapping EIA 923 fuel type codes (keys) and fuel type
names / descriptions (values).
"""
# Fuel type strings for EIA 923 generator fuel table
fuel_type_eia923_gen_fuel_coal_strings = [
'ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with coal fuel.
"""
fuel_type_eia923_gen_fuel_oil_strings = [
'dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: The list of EIA 923 Generation Fuel strings associated with oil fuel.
"""
fuel_type_eia923_gen_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: The list of EIA 923 Generation Fuel strings associated with gas fuel.
"""
fuel_type_eia923_gen_fuel_solar_strings = ['sun', ]
"""list: The list of EIA 923 Generation Fuel strings associated with solar
power.
"""
fuel_type_eia923_gen_fuel_wind_strings = ['wnd', ]
"""list: The list of EIA 923 Generation Fuel strings associated with wind
power.
"""
fuel_type_eia923_gen_fuel_hydro_strings = ['wat', ]
"""list: The list of EIA 923 Generation Fuel strings associated with hydro
power.
"""
fuel_type_eia923_gen_fuel_nuclear_strings = ['nuc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with nuclear
power.
"""
fuel_type_eia923_gen_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'msw', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds']
"""list: The list of EIA 923 Generation Fuel strings associated with solid waste
fuel.
"""
fuel_type_eia923_gen_fuel_other_strings = ['geo', 'mwh', 'oth', 'pur', 'wh', ]
"""list: The list of EIA 923 Generation Fuel strings associated with geothermal
power.
"""
fuel_type_eia923_gen_fuel_simple_map = {
'coal': fuel_type_eia923_gen_fuel_coal_strings,
'oil': fuel_type_eia923_gen_fuel_oil_strings,
'gas': fuel_type_eia923_gen_fuel_gas_strings,
'solar': fuel_type_eia923_gen_fuel_solar_strings,
'wind': fuel_type_eia923_gen_fuel_wind_strings,
'hydro': fuel_type_eia923_gen_fuel_hydro_strings,
'nuclear': fuel_type_eia923_gen_fuel_nuclear_strings,
'waste': fuel_type_eia923_gen_fuel_waste_strings,
'other': fuel_type_eia923_gen_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Generation Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# Fuel type strings for EIA 923 boiler fuel table
fuel_type_eia923_boiler_fuel_coal_strings = [
'ant', 'bit', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
coal.
"""
fuel_type_eia923_boiler_fuel_oil_strings = ['dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
oil.
"""
fuel_type_eia923_boiler_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
gas.
"""
fuel_type_eia923_boiler_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
waste.
"""
fuel_type_eia923_boiler_fuel_other_strings = ['oth', 'pur', 'wh', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
other.
"""
fuel_type_eia923_boiler_fuel_simple_map = {
'coal': fuel_type_eia923_boiler_fuel_coal_strings,
'oil': fuel_type_eia923_boiler_fuel_oil_strings,
'gas': fuel_type_eia923_boiler_fuel_gas_strings,
'waste': fuel_type_eia923_boiler_fuel_waste_strings,
'other': fuel_type_eia923_boiler_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Boiler Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# PUDL consolidation of EIA923 AER fuel type strings into same categories as
# 'energy_source_eia923' plus additional renewable and nuclear categories.
# These classifications are not currently used, as the EIA fuel type and energy
# source designations provide more detailed information.
aer_coal_strings = ['col', 'woc', 'pc']
"""list: A list of EIA 923 AER fuel type strings associated with coal.
"""
aer_gas_strings = ['mlg', 'ng', 'oog']
"""list: A list of EIA 923 AER fuel type strings associated with gas.
"""
aer_oil_strings = ['dfo', 'rfo', 'woo']
"""list: A list of EIA 923 AER fuel type strings associated with oil.
"""
aer_solar_strings = ['sun']
"""list: A list of EIA 923 AER fuel type strings associated with solar power.
"""
aer_wind_strings = ['wnd']
"""list: A list of EIA 923 AER fuel type strings associated with wind power.
"""
aer_hydro_strings = ['hps', 'hyc']
"""list: A list of EIA 923 AER fuel type strings associated with hydro power.
"""
aer_nuclear_strings = ['nuc']
"""list: A list of EIA 923 AER fuel type strings associated with nuclear power.
"""
aer_waste_strings = ['www']
"""list: A list of EIA 923 AER fuel type strings associated with waste.
"""
aer_other_strings = ['geo', 'orw', 'oth']
"""list: A list of EIA 923 AER fuel type strings associated with other fuel.
"""
aer_fuel_type_strings = {
'coal': aer_coal_strings,
'gas': aer_gas_strings,
'oil': aer_oil_strings,
'solar': aer_solar_strings,
'wind': aer_wind_strings,
'hydro': aer_hydro_strings,
'nuclear': aer_nuclear_strings,
'waste': aer_waste_strings,
'other': aer_other_strings
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923: A partial aggregation of the reported fuel type codes into
# larger categories used by EIA in, for example,
# the Annual Energy Review (AER).Two or three letter alphanumeric.
# See the Fuel Code table (Table 5), below:
fuel_type_aer_eia923 = {
'SUN': 'Solar PV and thermal',
'COL': 'Coal',
'DFO': 'Distillate Petroleum',
'GEO': 'Geothermal',
'HPS': 'Hydroelectric Pumped Storage',
'HYC': 'Hydroelectric Conventional',
'MLG': 'Biogenic Municipal Solid Waste and Landfill Gas',
'NG': 'Natural Gas',
'NUC': 'Nuclear',
'OOG': 'Other Gases',
'ORW': 'Other Renewables',
'OTH': 'Other (including nonbiogenic MSW)',
'PC': 'Petroleum Coke',
'RFO': 'Residual Petroleum',
'WND': 'Wind',
'WOC': 'Waste Coal',
'WOO': 'Waste Oil',
'WWW': 'Wood and Wood Waste'
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
fuel_type_eia860_coal_strings = ['ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc',
'sub', 'wc', 'coal', 'petroleum coke', 'col',
'woc']
"""list: A list of strings from EIA 860 associated with fuel type coal.
"""
fuel_type_eia860_oil_strings = ['dfo', 'jf', 'ker', 'rfo', 'wo', 'woo',
'petroleum']
"""list: A list of strings from EIA 860 associated with fuel type oil.
"""
fuel_type_eia860_gas_strings = ['bfg', 'lfg', 'mlg', 'ng', 'obg', 'og', 'pg',
'sgc', 'sgp', 'natural gas', 'other gas',
'oog', 'sg']
"""list: A list of strings from EIA 860 associated with fuel type gas.
"""
fuel_type_eia860_solar_strings = ['sun', 'solar']
"""list: A list of strings from EIA 860 associated with solar power.
"""
fuel_type_eia860_wind_strings = ['wnd', 'wind', 'wt']
"""list: A list of strings from EIA 860 associated with wind power.
"""
fuel_type_eia860_hydro_strings = ['wat', 'hyc', 'hps', 'hydro']
"""list: A list of strings from EIA 860 associated with hydro power.
"""
fuel_type_eia860_nuclear_strings = ['nuc', 'nuclear']
"""list: A list of strings from EIA 860 associated with nuclear power.
"""
fuel_type_eia860_waste_strings = ['ab', 'blq', 'bm', 'msb', 'msn', 'obl',
'obs', 'slw', 'tdf', 'wdl', 'wds', 'biomass',
'msw', 'www']
"""list: A list of strings from EIA 860 associated with fuel type waste.
"""
fuel_type_eia860_other_strings = ['mwh', 'oth', 'pur', 'wh', 'geo', 'none',
'orw', 'other']
"""list: A list of strings from EIA 860 associated with fuel type other.
"""
fuel_type_eia860_simple_map = {
'coal': fuel_type_eia860_coal_strings,
'oil': fuel_type_eia860_oil_strings,
'gas': fuel_type_eia860_gas_strings,
'solar': fuel_type_eia860_solar_strings,
'wind': fuel_type_eia860_wind_strings,
'hydro': fuel_type_eia860_hydro_strings,
'nuclear': fuel_type_eia860_nuclear_strings,
'waste': fuel_type_eia860_waste_strings,
'other': fuel_type_eia860_other_strings,
}
"""dict: A dictionary mapping EIA 860 fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923/860: Lumping of energy source categories.
energy_source_eia_simple_map = {
'coal': ['ANT', 'BIT', 'LIG', 'PC', 'SUB', 'WC', 'RC'],
'oil': ['DFO', 'JF', 'KER', 'RFO', 'WO'],
'gas': ['BFG', 'LFG', 'NG', 'OBG', 'OG', 'PG', 'SG', 'SGC', 'SGP'],
'solar': ['SUN'],
'wind': ['WND'],
'hydro': ['WAT'],
'nuclear': ['NUC'],
'waste': ['AB', 'BLQ', 'MSW', 'OBL', 'OBS', 'SLW', 'TDF', 'WDL', 'WDS'],
'other': ['GEO', 'MWH', 'OTH', 'PUR', 'WH']
}
"""dict: A dictionary mapping EIA fuel types (keys) to fuel codes (values).
"""
fuel_group_eia923_simple_map = {
'coal': ['coal', 'petroleum coke'],
'oil': ['petroleum'],
'gas': ['natural gas', 'other gas']
}
"""dict: A dictionary mapping EIA 923 simple fuel types("oil", "coal", "gas")
(keys) to fuel types (values).
"""
# EIA 923: The type of physical units fuel consumption is reported in.
# All consumption is reported in either short tons for solids,
# thousands of cubic feet for gases, and barrels for liquids.
fuel_units_eia923 = {
'mcf': 'Thousands of cubic feet (for gases)',
'short_tons': 'Short tons (for solids)',
'barrels': 'Barrels (for liquids)'
}
"""dict: A dictionary mapping EIA 923 fuel units (keys) to fuel unit
descriptions (values).
"""
# EIA 923: Designates the purchase type under which receipts occurred
# in the reporting month. One or two character alphanumeric:
contract_type_eia923 = {
'C': 'Contract - Fuel received under a purchase order or contract with a term of one year or longer. Contracts with a shorter term are considered spot purchases ',
'NC': 'New Contract - Fuel received under a purchase order or contract with duration of one year or longer, under which deliveries were first made during the reporting month',
'N': 'New Contract - see NC code. This abbreviation existed only in 2008 before being replaced by NC.',
'S': 'Spot Purchase',
'T': 'Tolling Agreement – Fuel received under a tolling agreement (bartering arrangement of fuel for generation)'
}
"""dict: A dictionary mapping EIA 923 contract codes (keys) to contract
descriptions (values) for each month in the Fuel Receipts and Costs table.
"""
# EIA 923: The fuel code associated with the fuel receipt.
# Defined on Page 7 of EIA Form 923
# Two or three character alphanumeric:
energy_source_eia923 = {
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BM': 'Biomass',
'BIT': 'Bituminous Coal',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LIG': 'Lignite Coal',
'NG': 'Natural Gas',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propone',
'OG': 'Other Gas',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SG': 'Synthesis Gas from Petroleum Coke',
'SGP': 'Petroleum Coke Derived Synthesis Gas',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SUB': 'Subbituminous Coal',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.',
}
"""dict: A dictionary mapping fuel codes (keys) to fuel descriptions (values)
for each fuel receipt from the EIA 923 Fuel Receipts and Costs table.
"""
# EIA 923 Fuel Group, from Page 7 EIA Form 923
# Groups fossil fuel energy sources into fuel groups that are located in the
# Electric Power Monthly: Coal, Natural Gas, Petroleum, Petroleum Coke.
fuel_group_eia923 = (
'coal',
'natural_gas',
'petroleum',
'petroleum_coke',
'other_gas'
)
"""tuple: A tuple containing EIA 923 fuel groups.
"""
# EIA 923: Type of Coal Mine as defined on Page 7 of EIA Form 923
coalmine_type_eia923 = {
'P': 'Preparation Plant',
'S': 'Surface',
'U': 'Underground',
'US': 'Both an underground and surface mine with most coal extracted from underground',
'SU': 'Both an underground and surface mine with most coal extracted from surface',
}
"""dict: A dictionary mapping EIA 923 coal mine type codes (keys) to
descriptions (values).
"""
# EIA 923: State abbreviation related to coal mine location.
# Country abbreviations are also used in this category, but they are
# non-standard because of collisions with US state names. Instead of using
# the provided non-standard names, we convert to ISO-3166-1 three letter
# country codes https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3
coalmine_country_eia923 = {
'AU': 'AUS', # Australia
'CL': 'COL', # Colombia
'CN': 'CAN', # Canada
'IS': 'IDN', # Indonesia
'PL': 'POL', # Poland
'RS': 'RUS', # Russia
'UK': 'GBR', # United Kingdom of Great Britain
'VZ': 'VEN', # Venezuela
'OT': 'other_country',
'IM': 'unknown'
}
"""dict: A dictionary mapping coal mine country codes (keys) to ISO-3166-1 three
letter country codes (values).
"""
# EIA 923: Mode for the longest / second longest distance.
transport_modes_eia923 = {
'RR': 'Rail: Shipments of fuel moved to consumers by rail \
(private or public/commercial). Included is coal hauled to or \
away from a railroad siding by truck if the truck did not use public\
roads.',
'RV': 'River: Shipments of fuel moved to consumers via river by barge. \
Not included are shipments to Great Lakes coal loading docks, \
tidewater piers, or coastal ports.',
'GL': 'Great Lakes: Shipments of coal moved to consumers via \
the Great Lakes. These shipments are moved via the Great Lakes \
coal loading docks, which are identified by name and location as \
follows: Conneaut Coal Storage & Transfer, Conneaut, Ohio; \
NS Coal Dock (Ashtabula Coal Dock), Ashtabula, Ohio; \
Sandusky Coal Pier, Sandusky, Ohio; Toledo Docks, Toledo, Ohio; \
KCBX Terminals Inc., Chicago, Illinois; \
Superior Midwest Energy Terminal, Superior, Wisconsin',
'TP': 'Tidewater Piers and Coastal Ports: Shipments of coal moved to \
Tidewater Piers and Coastal Ports for further shipments to consumers \
via coastal water or ocean. The Tidewater Piers and Coastal Ports \
are identified by name and location as follows: Dominion Terminal \
Associates, Newport News, Virginia; McDuffie Coal Terminal, Mobile, \
Alabama; IC Railmarine Terminal, Convent, Louisiana; \
International Marine Terminals, Myrtle Grove, Louisiana; \
Cooper/T. Smith Stevedoring Co. Inc., Darrow, Louisiana; \
Seward Terminal Inc., Seward, Alaska; Los Angeles Export Terminal, \
Inc., Los Angeles, California; Levin-Richmond Terminal Corp., \
Richmond, California; Baltimore Terminal, Baltimore, Maryland; \
Norfolk Southern Lamberts Point P-6, Norfolk, Virginia; \
Chesapeake Bay Piers, Baltimore, Maryland; Pier IX Terminal Company, \
Newport News, Virginia; Electro-Coal Transport Corp., Davant, \
Louisiana',
'WT': 'Water: Shipments of fuel moved to consumers by other waterways.',
'TR': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'tr': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'TC': 'Tramway/Conveyor: Shipments of fuel moved to consumers \
by tramway or conveyor.',
'SP': 'Slurry Pipeline: Shipments of coal moved to consumers \
by slurry pipeline.',
'PL': 'Pipeline: Shipments of fuel moved to consumers by pipeline'
}
"""dict: A dictionary mapping primary and secondary transportation mode codes
(keys) to descriptions (values).
"""
# we need to include all of the columns which we want to keep for either the
# entity or annual tables. The order here matters. We need to harvest the plant
# location before harvesting the location of the utilites for example.
entities = {
'plants': [
# base cols
['plant_id_eia'],
# static cols
['balancing_authority_code_eia', 'balancing_authority_name_eia',
'city', 'county', 'ferc_cogen_status',
'ferc_exempt_wholesale_generator', 'ferc_small_power_producer',
'grid_voltage_2_kv', 'grid_voltage_3_kv', 'grid_voltage_kv',
'iso_rto_code', 'latitude', 'longitude', 'service_area',
'plant_name_eia', 'primary_purpose_naics_id',
'sector_id', 'sector_name', 'state', 'street_address', 'zip_code'],
# annual cols
['ash_impoundment', 'ash_impoundment_lined', 'ash_impoundment_status',
'datum', 'energy_storage', 'ferc_cogen_docket_no', 'water_source',
'ferc_exempt_wholesale_generator_docket_no',
'ferc_small_power_producer_docket_no',
'liquefied_natural_gas_storage',
'natural_gas_local_distribution_company', 'natural_gas_storage',
'natural_gas_pipeline_name_1', 'natural_gas_pipeline_name_2',
'natural_gas_pipeline_name_3', 'nerc_region', 'net_metering',
'pipeline_notes', 'regulatory_status_code',
'transmission_distribution_owner_id',
'transmission_distribution_owner_name',
'transmission_distribution_owner_state', 'utility_id_eia'],
# need type fixing
{},
],
'generators': [
# base cols
['plant_id_eia', 'generator_id'],
# static cols
['prime_mover_code', 'duct_burners', 'operating_date',
'topping_bottoming_code', 'solid_fuel_gasification',
'pulverized_coal_tech', 'fluidized_bed_tech', 'subcritical_tech',
'supercritical_tech', 'ultrasupercritical_tech', 'stoker_tech',
'other_combustion_tech', 'bypass_heat_recovery',
'rto_iso_lmp_node_id', 'rto_iso_location_wholesale_reporting_id',
'associated_combined_heat_power', 'original_planned_operating_date',
'operating_switch', 'previously_canceled'],
# annual cols
['capacity_mw', 'fuel_type_code_pudl', 'multiple_fuels',
'ownership_code', 'owned_by_non_utility', 'deliver_power_transgrid',
'summer_capacity_mw', 'winter_capacity_mw', 'summer_capacity_estimate',
'winter_capacity_estimate', 'minimum_load_mw', 'distributed_generation',
'technology_description', 'reactive_power_output_mvar',
'energy_source_code_1', 'energy_source_code_2',
'energy_source_code_3', 'energy_source_code_4',
'energy_source_code_5', 'energy_source_code_6',
'energy_source_1_transport_1', 'energy_source_1_transport_2',
'energy_source_1_transport_3', 'energy_source_2_transport_1',
'energy_source_2_transport_2', 'energy_source_2_transport_3',
'startup_source_code_1', 'startup_source_code_2',
'startup_source_code_3', 'startup_source_code_4',
'time_cold_shutdown_full_load_code', 'syncronized_transmission_grid',
'turbines_num', 'operational_status_code', 'operational_status',
'planned_modifications', 'planned_net_summer_capacity_uprate_mw',
'planned_net_winter_capacity_uprate_mw', 'planned_new_capacity_mw',
'planned_uprate_date', 'planned_net_summer_capacity_derate_mw',
'planned_net_winter_capacity_derate_mw', 'planned_derate_date',
'planned_new_prime_mover_code', 'planned_energy_source_code_1',
'planned_repower_date', 'other_planned_modifications',
'other_modifications_date', 'planned_retirement_date',
'carbon_capture', 'cofire_fuels', 'switch_oil_gas',
'turbines_inverters_hydrokinetics', 'nameplate_power_factor',
'uprate_derate_during_year', 'uprate_derate_completed_date',
'current_planned_operating_date', 'summer_estimated_capability_mw',
'winter_estimated_capability_mw', 'retirement_date',
'utility_id_eia', 'data_source'],
# need type fixing
{}
],
# utilities must come after plants. plant location needs to be
# removed before the utility locations are compiled
'utilities': [
# base cols
['utility_id_eia'],
# static cols
['utility_name_eia'],
# annual cols
['street_address', 'city', 'state', 'zip_code', 'entity_type',
'plants_reported_owner', 'plants_reported_operator',
'plants_reported_asset_manager', 'plants_reported_other_relationship',
'attention_line', 'address_2', 'zip_code_4',
'contact_firstname', 'contact_lastname', 'contact_title',
'contact_firstname_2', 'contact_lastname_2', 'contact_title_2',
'phone_extension_1', 'phone_extension_2', 'phone_number_1',
'phone_number_2'],
# need type fixing
{'utility_id_eia': 'int64', }, ],
'boilers': [
# base cols
['plant_id_eia', 'boiler_id'],
# static cols
['prime_mover_code'],
# annual cols
[],
# need type fixing
{},
]
}
"""dict: A dictionary containing table name strings (keys) and lists of columns
to keep for those tables (values).
"""
epacems_tables = ("hourly_emissions_epacems")
"""tuple: A tuple containing tables of EPA CEMS data to pull into PUDL.
"""
files_dict_epaipm = {
'transmission_single_epaipm': '*table_3-21*',
'transmission_joint_epaipm': '*transmission_joint_ipm*',
'load_curves_epaipm': '*table_2-2_*',
'plant_region_map_epaipm': '*needs_v6*',
}
"""dict: A dictionary of EPA IPM tables and strings that files of those tables
contain.
"""
epaipm_url_ext = {
'transmission_single_epaipm': 'table_3-21_annual_transmission_capabilities_of_u.s._model_regions_in_epa_platform_v6_-_2021.xlsx',
'load_curves_epaipm': 'table_2-2_load_duration_curves_used_in_epa_platform_v6.xlsx',
'plant_region_map_epaipm': 'needs_v6_november_2018_reference_case_0.xlsx',
}
"""dict: A dictionary of EPA IPM tables and associated URLs extensions for
downloading that table's data.
"""
epaipm_region_names = [
'ERC_PHDL', 'ERC_REST', 'ERC_FRNT', 'ERC_GWAY', 'ERC_WEST',
'FRCC', 'NENG_CT', 'NENGREST', 'NENG_ME', 'MIS_AR', 'MIS_IL',
'MIS_INKY', 'MIS_IA', 'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI',
'MIS_D_MS', 'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA',
'MIS_WUMS', 'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D', 'NY_Z_F',
'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K', 'PJM_West', 'PJM_AP', 'PJM_ATSI',
'PJM_COMD', 'PJM_Dom', 'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC',
'PJM_WMAC', 'S_C_KY', 'S_C_TVA', 'S_D_AECI', 'S_SOU', 'S_VACA',
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE',
'WECC_AZ', 'WEC_BANC', 'WECC_CO', 'WECC_ID', 'WECC_IID',
'WEC_LADW', 'WECC_MT', 'WECC_NM', 'WEC_CALN', 'WECC_NNV',
'WECC_PNW', 'WEC_SDGE', 'WECC_SCE', 'WECC_SNV', 'WECC_UT',
'WECC_WY', 'CN_AB', 'CN_BC', 'CN_NL', 'CN_MB', 'CN_NB', 'CN_NF',
'CN_NS', 'CN_ON', 'CN_PE', 'CN_PQ', 'CN_SK',
]
"""list: A list of EPA IPM region names."""
epaipm_region_aggregations = {
'PJM': [
'PJM_AP', 'PJM_ATSI', 'PJM_COMD', 'PJM_Dom',
'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC', 'PJM_WMAC'
],
'NYISO': [
'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D',
'NY_Z_F', 'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K'
],
'ISONE': ['NENG_CT', 'NENGREST', 'NENG_ME'],
'MISO': [
'MIS_AR', 'MIS_IL', 'MIS_INKY', 'MIS_IA',
'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI', 'MIS_D_MS',
'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA', 'MIS_WUMS'
],
'SPP': [
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE'
],
'WECC_NW': [
'WECC_CO', 'WECC_ID', 'WECC_MT', 'WECC_NNV',
'WECC_PNW', 'WECC_UT', 'WECC_WY'
]
}
"""
dict: A dictionary containing EPA IPM regions (keys) and lists of their
associated abbreviations (values).
"""
epaipm_rename_dict = {
'transmission_single_epaipm': {
'From': 'region_from',
'To': 'region_to',
'Capacity TTC (MW)': 'firm_ttc_mw',
'Energy TTC (MW)': 'nonfirm_ttc_mw',
'Transmission Tariff (2016 mills/kWh)': 'tariff_mills_kwh',
},
'load_curves_epaipm': {
'day': 'day_of_year',
'region': 'region_id_epaipm',
},
'plant_region_map_epaipm': {
'ORIS Plant Code': 'plant_id_eia',
'Region Name': 'region',
},
}
glue_pudl_tables = ('plants_eia', 'plants_ferc', 'plants', 'utilities_eia',
'utilities_ferc', 'utilities', 'utility_plant_assn')
"""
dict: A dictionary of dictionaries containing EPA IPM tables (keys) and items
for each table to be renamed along with the replacement name (values).
"""
data_sources = (
'eia860',
'eia861',
'eia923',
'epacems',
'epaipm',
'ferc1',
'ferc714',
# 'pudl'
)
"""tuple: A tuple containing the data sources we are able to pull into PUDL."""
# All the years for which we ought to be able to download these data sources
data_years = {
'eia860': tuple(range(2001, 2020)),
'eia861': tuple(range(1990, 2020)),
'eia923': tuple(range(2001, 2020)),
'epacems': tuple(range(1995, 2021)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2020)),
'ferc714': (None, ),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years
that we expect to be able to download for each data source (values).
"""
# The full set of years we currently expect to be able to ingest, per source:
working_partitions = {
'eia860': {
'years': tuple(range(2004, 2020))
},
'eia860m': {
'year_month': '2020-11'
},
'eia861': {
'years': tuple(range(2001, 2020))
},
'eia923': {
'years': tuple(range(2001, 2020))
},
'epacems': {
'years': tuple(range(1995, 2021)),
'states': tuple(cems_states.keys())},
'ferc1': {
'years': tuple(range(1994, 2020))
},
'ferc714': {},
}
"""
dict: A dictionary of data sources (keys) and dictionaries (values) of names of
partition type (sub-key) and paritions (sub-value) containing the paritions
such as tuples of years for each data source that are able to be ingested
into PUDL.
"""
pudl_tables = {
'eia860': eia860_pudl_tables,
'eia861': (
"service_territory_eia861",
"balancing_authority_eia861",
"sales_eia861",
"advanced_metering_infrastructure_eia861",
"demand_response_eia861",
"demand_side_management_eia861",
"distributed_generation_eia861",
"distribution_systems_eia861",
"dynamic_pricing_eia861",
"energy_efficiency_eia861",
"green_pricing_eia861",
"mergers_eia861",
"net_metering_eia861",
"non_net_metering_eia861",
"operational_data_eia861",
"reliability_eia861",
"utility_data_eia861",
),
'eia923': eia923_pudl_tables,
'epacems': epacems_tables,
'epaipm': epaipm_pudl_tables,
'ferc1': ferc1_pudl_tables,
'ferc714': (
"respondent_id_ferc714",
"id_certification_ferc714",
"gen_plants_ba_ferc714",
"demand_monthly_ba_ferc714",
"net_energy_load_ba_ferc714",
"adjacency_ba_ferc714",
"interchange_ba_ferc714",
"lambda_hourly_ba_ferc714",
"lambda_description_ferc714",
"description_pa_ferc714",
"demand_forecast_pa_ferc714",
"demand_hourly_pa_ferc714",
),
'glue': glue_pudl_tables,
}
"""
dict: A dictionary containing data sources (keys) and the list of associated
tables from that datasource that can be pulled into PUDL (values).
"""
base_data_urls = {
'eia860': 'https://www.eia.gov/electricity/data/eia860',
'eia861': 'https://www.eia.gov/electricity/data/eia861/zip',
'eia923': 'https://www.eia.gov/electricity/data/eia923',
'epacems': 'ftp://newftp.epa.gov/dmdnload/emissions/hourly/monthly',
'ferc1': 'ftp://eforms1.ferc.gov/f1allyears',
'ferc714': 'https://www.ferc.gov/docs-filing/forms/form-714/data',
'ferceqr': 'ftp://eqrdownload.ferc.gov/DownloadRepositoryProd/BulkNew/CSV',
'msha': 'https://arlweb.msha.gov/OpenGovernmentData/DataSets',
'epaipm': 'https://www.epa.gov/sites/production/files/2019-03',
'pudl': 'https://catalyst.coop/pudl/'
}
"""
dict: A dictionary containing data sources (keys) and their base data URLs
(values).
"""
need_fix_inting = {
'plants_steam_ferc1': ('construction_year', 'installation_year'),
'plants_small_ferc1': ('construction_year', 'ferc_license_id'),
'plants_hydro_ferc1': ('construction_year', 'installation_year',),
'plants_pumped_storage_ferc1': ('construction_year', 'installation_year',),
'hourly_emissions_epacems': ('facility_id', 'unit_id_epa',),
}
"""
dict: A dictionary containing tables (keys) and column names (values)
containing integer - type columns whose null values need fixing.
"""
contributors = {
"catalyst-cooperative": {
"title": "Catalyst Cooperative",
"path": "https://catalyst.coop/",
"role": "publisher",
"email": "<EMAIL>",
"organization": "Catalyst Cooperative",
},
"zane-selvans": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://amateurearthling.org/",
"role": "wrangler",
"organization": "Catalyst Cooperative"
},
"christina-gosnell": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"steven-winter": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"alana-wilson": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"karl-dunkle-werner": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://karldw.org/",
"role": "contributor",
"organization": "UC Berkeley",
},
'greg-schivley': {
"title": "<NAME>",
"role": "contributor",
},
}
"""
dict: A dictionary of dictionaries containing organization names (keys) and
their attributes (values).
"""
data_source_info = {
"eia860": {
"title": "EIA Form 860",
"path": "https://www.eia.gov/electricity/data/eia860/",
},
"eia861": {
"title": "EIA Form 861",
"path": "https://www.eia.gov/electricity/data/eia861/",
},
"eia923": {
"title": "EIA Form 923",
"path": "https://www.eia.gov/electricity/data/eia923/",
},
"eiawater": {
"title": "EIA Water Use for Power",
"path": "https://www.eia.gov/electricity/data/water/",
},
"epacems": {
"title": "EPA Air Markets Program Data",
"path": "https://ampd.epa.gov/ampd/",
},
"epaipm": {
"title": "EPA Integrated Planning Model",
"path": "https://www.epa.gov/airmarkets/national-electric-energy-data-system-needs-v6",
},
"ferc1": {
"title": "FERC Form 1",
"path": "https://www.ferc.gov/docs-filing/forms/form-1/data.asp",
},
"ferc714": {
"title": "FERC Form 714",
"path": "https://www.ferc.gov/docs-filing/forms/form-714/data.asp",
},
"ferceqr": {
"title": "FERC Electric Quarterly Report",
"path": "https://www.ferc.gov/docs-filing/eqr.asp",
},
"msha": {
"title": "Mining Safety and Health Administration",
"path": "https://www.msha.gov/mine-data-retrieval-system",
},
"phmsa": {
"title": "Pipelines and Hazardous Materials Safety Administration",
"path": "https://www.phmsa.dot.gov/data-and-statistics/pipeline/data-and-statistics-overview",
},
"pudl": {
"title": "The Public Utility Data Liberation Project (PUDL)",
"path": "https://catalyst.coop/pudl/",
"email": "<EMAIL>",
},
}
"""
dict: A dictionary of dictionaries containing datasources (keys) and
associated attributes (values)
"""
contributors_by_source = {
"pudl": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
"karl-dunkle-werner",
],
"eia923": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
],
"eia860": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"ferc1": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"epacems": [
"catalyst-cooperative",
"karl-dunkle-werner",
"zane-selvans",
],
"epaipm": [
"greg-schivley",
],
}
"""
dict: A dictionary of data sources (keys) and lists of contributors (values).
"""
licenses = {
"cc-by-4.0": {
"name": "CC-BY-4.0",
"title": "Creative Commons Attribution 4.0",
"path": "https://creativecommons.org/licenses/by/4.0/"
},
"us-govt": {
"name": "other-pd",
"title": "U.S. Government Work",
"path": "http://www.usa.gov/publicdomain/label/1.0/",
}
}
"""
dict: A dictionary of dictionaries containing license types and their
attributes.
"""
output_formats = [
'sqlite',
'parquet',
'datapkg',
]
"""list: A list of types of PUDL output formats."""
keywords_by_data_source = {
'pudl': [
'us', 'electricity',
],
'eia860': [
'electricity', 'electric', 'boiler', 'generator', 'plant', 'utility',
'fuel', 'coal', 'natural gas', 'prime mover', 'eia860', 'retirement',
'capacity', 'planned', 'proposed', 'energy', 'hydro', 'solar', 'wind',
'nuclear', 'form 860', 'eia', 'annual', 'gas', 'ownership', 'steam',
'turbine', 'combustion', 'combined cycle', 'eia',
'energy information administration'
],
'eia923': [
'fuel', 'boiler', 'generator', 'plant', 'utility', 'cost', 'price',
'natural gas', 'coal', 'eia923', 'energy', 'electricity', 'form 923',
'receipts', 'generation', 'net generation', 'monthly', 'annual', 'gas',
'fuel consumption', 'MWh', 'energy information administration', 'eia',
'mercury', 'sulfur', 'ash', 'lignite', 'bituminous', 'subbituminous',
'heat content'
],
'epacems': [
'epa', 'us', 'emissions', 'pollution', 'ghg', 'so2', 'co2', 'sox',
'nox', 'load', 'utility', 'electricity', 'plant', 'generator', 'unit',
'generation', 'capacity', 'output', 'power', 'heat content', 'mmbtu',
'steam', 'cems', 'continuous emissions monitoring system', 'hourly'
'environmental protection agency', 'ampd', 'air markets program data',
],
'ferc1': [
'electricity', 'electric', 'utility', 'plant', 'steam', 'generation',
'cost', 'expense', 'price', 'heat content', 'ferc', 'form 1',
'federal energy regulatory commission', 'capital', 'accounting',
'depreciation', 'finance', 'plant in service', 'hydro', 'coal',
'natural gas', 'gas', 'opex', 'capex', 'accounts', 'investment',
'capacity'
],
'ferc714': [
'electricity', 'electric', 'utility', 'planning area', 'form 714',
'balancing authority', 'demand', 'system lambda', 'ferc',
'federal energy regulatory commission', "hourly", "generation",
"interchange", "forecast", "load", "adjacency", "plants",
],
'epaipm': [
'epaipm', 'integrated planning',
]
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
ENTITY_TYPE_DICT = {
'M': 'Municipal',
'C': 'Cooperative',
'R': 'Retail Power Marketer',
'I': 'Investor Owned',
'P': 'Political Subdivision',
'T': 'Transmission',
'S': 'State',
'W': 'Wholesale Power Marketer',
'F': 'Federal',
'A': 'Municipal Mktg Authority',
'G': 'Community Choice Aggregator',
'D': 'Nonutility DSM Administrator',
'B': 'Behind the Meter',
'Q': 'Independent Power Producer',
'IND': 'Industrial',
'COM': 'Commercial',
'PR': 'Private', # Added by AES for OD table (Arbitrary moniker)
'PO': 'Power Marketer', # Added by AES for OD table
'U': 'Unknown', # Added by AES for OD table
'O': 'Other' # Added by AES for OD table
}
# Confirm these designations -- educated guess based on the form instructions
MOMENTARY_INTERRUPTION_DEF = { # Added by AES for R table
'L': 'Less than 1 minute',
'F': 'Less than or equal to 5 minutes',
'O': 'Other',
}
# https://www.eia.gov/electricity/data/eia411/#tabs_NERC-3
RECOGNIZED_NERC_REGIONS = [
'BASN', # ASSESSMENT AREA Basin (WECC)
'CALN', # ASSESSMENT AREA California (WECC)
'CALS', # ASSESSMENT AREA California (WECC)
'DSW', # ASSESSMENT AREA Desert Southwest (WECC)
'ASCC', # Alaska
'ISONE', # ISO New England (NPCC)
'ERCOT', # lumped under TRE in 2017 Form instructions
'NORW', # ASSESSMENT AREA Northwest (WECC)
'NYISO', # ISO (NPCC)
'PJM', # RTO
'ROCK', # ASSESSMENT AREA Rockies (WECC)
'ECAR', # OLD RE Now part of RFC and SERC
'FRCC', # included in 2017 Form instructions, recently joined with SERC
'HICC', # Hawaii
'MAAC', # OLD RE Now part of RFC
'MAIN', # OLD RE Now part of SERC, RFC, MRO
'MAPP', # OLD/NEW RE Became part of MRO, resurfaced in 2010
'MRO', # RE included in 2017 Form instructions
'NPCC', # RE included in 2017 Form instructions
'RFC', # RE included in 2017 Form instructions
'SERC', # RE included in 2017 Form instructions
'SPP', # RE included in 2017 Form instructions
'TRE', # RE included in 2017 Form instructions (included ERCOT)
'WECC', # RE included in 2017 Form instructions
'WSCC', # OLD RE pre-2002 version of WECC
'MISO', # ISO unclear whether technically a regional entity, but lots of entries
'ECAR_MAAC',
'MAPP_WECC',
'RFC_SERC',
'SPP_WECC',
'MRO_WECC',
'ERCOT_SPP',
'SPP_TRE',
'ERCOT_TRE',
'MISO_TRE',
'VI', # Virgin Islands
'GU', # Guam
'PR', # Puerto Rico
'AS', # American Samoa
'UNK',
]
CUSTOMER_CLASSES = [
"commercial",
"industrial",
"direct_connection",
"other",
"residential",
"total",
"transportation"
]
TECH_CLASSES = [
'backup', # WHERE Is this used? because removed from DG table b/c not a real component
'chp_cogen',
'combustion_turbine',
'fuel_cell',
'hydro',
'internal_combustion',
'other',
'pv',
'steam',
'storage_pv',
'all_storage', # need 'all' as prefix so as not to confuse with other storage category
'total',
'virtual_pv',
'wind',
]
REVENUE_CLASSES = [
'retail_sales',
'unbundled',
'delivery_customers',
'sales_for_resale',
'credits_or_adjustments',
'other',
'transmission',
'total',
]
RELIABILITY_STANDARDS = [
'ieee_standard',
'other_standard'
]
FUEL_CLASSES = [
'gas',
'oil',
'other',
'renewable',
'water',
'wind',
'wood',
]
RTO_CLASSES = [
'caiso',
'ercot',
'pjm',
'nyiso',
'spp',
'miso',
'isone',
'other'
]
ESTIMATED_OR_ACTUAL = {'E': 'estimated', 'A': 'actual'}
TRANSIT_TYPE_DICT = {
'CV': 'conveyer',
'PL': 'pipeline',
'RR': 'railroad',
'TK': 'truck',
'WA': 'water',
'UN': 'unknown',
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
column_dtypes = {
"ferc1": { # Obviously this is not yet a complete list...
"construction_year": pd.Int64Dtype(),
"installation_year": pd.Int64Dtype(),
"plant_id_ferc1": pd.Int64Dtype(),
"plant_id_pudl": pd.Int64Dtype(),
"report_date": "datetime64[ns]",
"report_year": pd.Int64Dtype(),
"utility_id_ferc1": pd.Int64Dtype(),
"utility_id_pudl": pd.Int64Dtype(),
},
"ferc714": { # INCOMPLETE
"demand_mwh": float,
"demand_annual_mwh": float,
"eia_code": pd.Int64Dtype(),
"peak_demand_summer_mw": float,
"peak_demand_winter_mw": float,
"report_date": "datetime64[ns]",
"respondent_id_ferc714": pd.Int64Dtype(),
"respondent_name_ferc714": pd.StringDtype(),
"respondent_type": pd.CategoricalDtype(categories=[
"utility", "balancing_authority",
]),
"timezone": pd.CategoricalDtype(categories=[
"America/New_York", "America/Chicago", "America/Denver",
"America/Los_Angeles", "America/Anchorage", "Pacific/Honolulu"]),
"utc_datetime": "datetime64[ns]",
},
"epacems": {
'state': pd.StringDtype(),
'plant_id_eia': pd.Int64Dtype(), # Nullable Integer
'unitid': pd.StringDtype(),
'operating_datetime_utc': "datetime64[ns]",
'operating_time_hours': float,
'gross_load_mw': float,
'steam_load_1000_lbs': float,
'so2_mass_lbs': float,
'so2_mass_measurement_code': pd.StringDtype(),
'nox_rate_lbs_mmbtu': float,
'nox_rate_measurement_code': pd.StringDtype(),
'nox_mass_lbs': float,
'nox_mass_measurement_code': pd.StringDtype(),
'co2_mass_tons': float,
'co2_mass_measurement_code': pd.StringDtype(),
'heat_content_mmbtu': float,
'facility_id': pd.Int64Dtype(), # Nullable Integer
'unit_id_epa': pd.Int64Dtype(), # Nullable Integer
},
"eia": {
'actual_peak_demand_savings_mw': float, # Added by AES for DR table
'address_2': pd.StringDtype(), # Added by AES for 860 utilities table
'advanced_metering_infrastructure': pd.Int64Dtype(), # Added by AES for AMI table
# Added by AES for UD misc table
'alternative_fuel_vehicle_2_activity': pd.BooleanDtype(),
'alternative_fuel_vehicle_activity': pd.BooleanDtype(),
'annual_indirect_program_cost': float,
'annual_total_cost': float,
'ash_content_pct': float,
'ash_impoundment': pd.BooleanDtype(),
'ash_impoundment_lined': pd.BooleanDtype(),
# TODO: convert this field to more descriptive words
'ash_impoundment_status': pd.StringDtype(),
'associated_combined_heat_power': pd.BooleanDtype(),
'attention_line': pd.StringDtype(),
'automated_meter_reading': pd.Int64Dtype(), # Added by AES for AMI table
'backup_capacity_mw': float, # Added by AES for NNM & DG misc table
'balancing_authority_code_eia': pd.CategoricalDtype(),
'balancing_authority_id_eia': pd.Int64Dtype(),
'balancing_authority_name_eia': pd.StringDtype(),
'bga_source': pd.StringDtype(),
'boiler_id': pd.StringDtype(),
'bunded_activity': pd.BooleanDtype(),
'business_model': pd.CategoricalDtype(categories=[
"retail", "energy_services"]),
'buy_distribution_activity': pd.BooleanDtype(),
'buying_transmission_activity': pd.BooleanDtype(),
'bypass_heat_recovery': pd.BooleanDtype(),
'caidi_w_major_event_days_minus_loss_of_service_minutes': float,
'caidi_w_major_event_dats_minutes': float,
'caidi_wo_major_event_days_minutes': float,
'capacity_mw': float,
'carbon_capture': pd.BooleanDtype(),
'chlorine_content_ppm': float,
'circuits_with_voltage_optimization': pd.Int64Dtype(),
'city': pd.StringDtype(),
'cofire_fuels': pd.BooleanDtype(),
'consumed_by_facility_mwh': float,
'consumed_by_respondent_without_charge_mwh': float,
'contact_firstname': pd.StringDtype(),
'contact_firstname_2': pd.StringDtype(),
'contact_lastname': pd.StringDtype(),
'contact_lastname_2': pd.StringDtype(),
'contact_title': pd.StringDtype(),
'contact_title_2': pd.StringDtype(),
'contract_expiration_date': 'datetime64[ns]',
'contract_type_code': pd.StringDtype(),
'county': pd.StringDtype(),
'county_id_fips': pd.StringDtype(), # Must preserve leading zeroes
'credits_or_adjustments': float,
'critical_peak_pricing': pd.BooleanDtype(),
'critical_peak_rebate': pd.BooleanDtype(),
'current_planned_operating_date': 'datetime64[ns]',
'customers': float,
'customer_class': pd.CategoricalDtype(categories=CUSTOMER_CLASSES),
'customer_incentives_cost': float,
'customer_incentives_incremental_cost': float,
'customer_incentives_incremental_life_cycle_cost': float,
'customer_other_costs_incremental_life_cycle_cost': float,
'daily_digital_access_customers': pd.Int64Dtype(),
'data_observed': pd.BooleanDtype(),
'datum': pd.StringDtype(),
'deliver_power_transgrid': pd.BooleanDtype(),
'delivery_customers': float,
'direct_load_control_customers': pd.Int64Dtype(),
'distributed_generation': pd.BooleanDtype(),
'distributed_generation_owned_capacity_mw': float,
'distribution_activity': pd.BooleanDtype(),
'distribution_circuits': pd.Int64Dtype(),
'duct_burners': pd.BooleanDtype(),
'energy_displaced_mwh': float,
'energy_efficiency_annual_cost': float,
'energy_efficiency_annual_actual_peak_reduction_mw': float,
'energy_efficiency_annual_effects_mwh': float,
'energy_efficiency_annual_incentive_payment': float,
'energy_efficiency_incremental_actual_peak_reduction_mw': float,
'energy_efficiency_incremental_effects_mwh': float,
'energy_savings_estimates_independently_verified': pd.BooleanDtype(),
'energy_savings_independently_verified': pd.BooleanDtype(),
'energy_savings_mwh': float,
'energy_served_ami_mwh': float,
'energy_source_1_transport_1': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_1_transport_2': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_1_transport_3': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_1': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_2': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_3': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_code': pd.StringDtype(),
'energy_source_code_1': pd.StringDtype(),
'energy_source_code_2': pd.StringDtype(),
'energy_source_code_3': pd.StringDtype(),
'energy_source_code_4': pd.StringDtype(),
'energy_source_code_5': pd.StringDtype(),
'energy_source_code_6': pd.StringDtype(),
'energy_storage': pd.BooleanDtype(),
'entity_type': pd.CategoricalDtype(categories=ENTITY_TYPE_DICT.values()),
'estimated_or_actual_capacity_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'estimated_or_actual_fuel_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'estimated_or_actual_tech_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'exchange_energy_delivered_mwh': float,
'exchange_energy_recieved_mwh': float,
'ferc_cogen_docket_no': pd.StringDtype(),
'ferc_cogen_status': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator_docket_no': pd.StringDtype(),
'ferc_small_power_producer': pd.BooleanDtype(),
'ferc_small_power_producer_docket_no': pd.StringDtype(),
'fluidized_bed_tech': pd.BooleanDtype(),
'fraction_owned': float,
'fuel_class': pd.StringDtype(),
'fuel_consumed_for_electricity_mmbtu': float,
'fuel_consumed_for_electricity_units': float,
'fuel_consumed_mmbtu': float,
'fuel_consumed_units': float,
'fuel_cost_per_mmbtu': float,
'fuel_group_code': pd.StringDtype(),
'fuel_group_code_simple': pd.StringDtype(),
'fuel_mmbtu_per_unit': float,
'fuel_pct': float,
'fuel_qty_units': float,
# are fuel_type and fuel_type_code the same??
# fuel_type includes 40 code-like things.. WAT, SUN, NUC, etc.
'fuel_type': pd.StringDtype(),
# from the boiler_fuel_eia923 table, there are 30 code-like things, like NG, BIT, LIG
'fuel_type_code': pd.StringDtype(),
'fuel_type_code_aer': pd.StringDtype(),
'fuel_type_code_pudl': pd.StringDtype(),
'furnished_without_charge_mwh': float,
'generation_activity': pd.BooleanDtype(),
# this is a mix of integer-like values (2 or 5) and strings like AUGSF
'generator_id': pd.StringDtype(),
'generators_number': float,
'generators_num_less_1_mw': float,
'green_pricing_revenue': float,
'grid_voltage_2_kv': float,
'grid_voltage_3_kv': float,
'grid_voltage_kv': float,
'heat_content_mmbtu_per_unit': float,
'highest_distribution_voltage_kv': float,
'home_area_network': pd.Int64Dtype(),
'inactive_accounts_included': pd.BooleanDtype(),
'incremental_energy_savings_mwh': float,
'incremental_life_cycle_energy_savings_mwh': float,
'incremental_life_cycle_peak_reduction_mwh': float,
'incremental_peak_reduction_mw': float,
'iso_rto_code': pd.StringDtype(),
'latitude': float,
'liquefied_natural_gas_storage': pd.BooleanDtype(),
'load_management_annual_cost': float,
'load_management_annual_actual_peak_reduction_mw': float,
'load_management_annual_effects_mwh': float,
'load_management_annual_incentive_payment': float,
'load_management_annual_potential_peak_reduction_mw': float,
'load_management_incremental_actual_peak_reduction_mw': float,
'load_management_incremental_effects_mwh': float,
'load_management_incremental_potential_peak_reduction_mw': float,
'longitude': float,
'major_program_changes': pd.BooleanDtype(),
'mercury_content_ppm': float,
'merge_address': pd.StringDtype(),
'merge_city': pd.StringDtype(),
'merge_company': pd.StringDtype(),
'merge_date': 'datetime64[ns]',
'merge_state': pd.StringDtype(),
'mine_id_msha': pd.Int64Dtype(),
'mine_id_pudl': pd.Int64Dtype(),
'mine_name': pd.StringDtype(),
'mine_type_code': pd.StringDtype(),
'minimum_load_mw': float,
'moisture_content_pct': float,
'momentary_interruption_definition': pd.CategoricalDtype(categories=MOMENTARY_INTERRUPTION_DEF.values()),
'multiple_fuels': pd.BooleanDtype(),
'nameplate_power_factor': float,
'natural_gas_delivery_contract_type_code': pd.StringDtype(),
'natural_gas_local_distribution_company': pd.StringDtype(),
'natural_gas_pipeline_name_1': pd.StringDtype(),
'natural_gas_pipeline_name_2': pd.StringDtype(),
'natural_gas_pipeline_name_3': pd.StringDtype(),
'natural_gas_storage': pd.BooleanDtype(),
'natural_gas_transport_code': pd.StringDtype(),
'nerc_region': pd.CategoricalDtype(categories=RECOGNIZED_NERC_REGIONS),
'nerc_regions_of_operation': pd.CategoricalDtype(categories=RECOGNIZED_NERC_REGIONS),
'net_generation_mwh': float,
'net_metering': pd.BooleanDtype(),
'net_power_exchanged_mwh': float,
'net_wheeled_power_mwh': float,
'new_parent': pd.StringDtype(),
'non_amr_ami': pd.Int64Dtype(),
'nuclear_unit_id': pd.Int64Dtype(),
'operates_generating_plant': pd.BooleanDtype(),
'operating_date': 'datetime64[ns]',
'operating_switch': pd.StringDtype(),
# TODO: double check this for early 860 years
'operational_status': pd.StringDtype(),
'operational_status_code': pd.StringDtype(),
'original_planned_operating_date': 'datetime64[ns]',
'other': float,
'other_combustion_tech': pd.BooleanDtype(),
'other_costs': float,
'other_costs_incremental_cost': float,
'other_modifications_date': 'datetime64[ns]',
'other_planned_modifications': pd.BooleanDtype(),
'outages_recorded_automatically': pd.BooleanDtype(),
'owned_by_non_utility': pd.BooleanDtype(),
'owner_city': pd.StringDtype(),
'owner_name': pd.StringDtype(),
'owner_state': pd.StringDtype(),
'owner_street_address': pd.StringDtype(),
'owner_utility_id_eia': pd.Int64Dtype(),
'owner_zip_code': pd.StringDtype(),
# we should transition these into readable codes, not a one letter thing
'ownership_code': pd.StringDtype(),
'phone_extension_1': pd.StringDtype(),
'phone_extension_2': pd.StringDtype(),
'phone_number_1': pd.StringDtype(),
'phone_number_2': pd.StringDtype(),
'pipeline_notes': pd.StringDtype(),
'planned_derate_date': 'datetime64[ns]',
'planned_energy_source_code_1': pd.StringDtype(),
'planned_modifications': pd.BooleanDtype(),
'planned_net_summer_capacity_derate_mw': float,
'planned_net_summer_capacity_uprate_mw': float,
'planned_net_winter_capacity_derate_mw': float,
'planned_net_winter_capacity_uprate_mw': float,
'planned_new_capacity_mw': float,
'planned_new_prime_mover_code': pd.StringDtype(),
'planned_repower_date': 'datetime64[ns]',
'planned_retirement_date': 'datetime64[ns]',
'planned_uprate_date': 'datetime64[ns]',
'plant_id_eia': pd.Int64Dtype(),
'plant_id_epa': pd.Int64Dtype(),
'plant_id_pudl': pd.Int64Dtype(),
'plant_name_eia': pd.StringDtype(),
'plants_reported_asset_manager': pd.BooleanDtype(),
'plants_reported_operator': pd.BooleanDtype(),
'plants_reported_other_relationship': pd.BooleanDtype(),
'plants_reported_owner': pd.BooleanDtype(),
'point_source_unit_id_epa': pd.StringDtype(),
'potential_peak_demand_savings_mw': float,
'pulverized_coal_tech': pd.BooleanDtype(),
'previously_canceled': pd.BooleanDtype(),
'price_responsive_programes': pd.BooleanDtype(),
'price_responsiveness_customers': pd.Int64Dtype(),
'primary_transportation_mode_code': pd.StringDtype(),
'primary_purpose_naics_id': pd.Int64Dtype(),
'prime_mover_code': pd.StringDtype(),
'pv_current_flow_type': pd.CategoricalDtype(categories=['AC', 'DC']),
'reactive_power_output_mvar': float,
'real_time_pricing_program': pd.BooleanDtype(),
'rec_revenue': float,
'rec_sales_mwh': float,
'regulatory_status_code': pd.StringDtype(),
'report_date': 'datetime64[ns]',
'reported_as_another_company': pd.StringDtype(),
'retail_marketing_activity': pd.BooleanDtype(),
'retail_sales': float,
'retail_sales_mwh': float,
'retirement_date': 'datetime64[ns]',
'revenue_class': pd.CategoricalDtype(categories=REVENUE_CLASSES),
'rto_iso_lmp_node_id': pd.StringDtype(),
'rto_iso_location_wholesale_reporting_id': pd.StringDtype(),
'rtos_of_operation': pd.StringDtype(),
'saidi_w_major_event_dats_minus_loss_of_service_minutes': float,
'saidi_w_major_event_days_minutes': float,
'saidi_wo_major_event_days_minutes': float,
'saifi_w_major_event_days_customers': float,
'saifi_w_major_event_days_minus_loss_of_service_customers': float,
'saifi_wo_major_event_days_customers': float,
'sales_for_resale': float,
'sales_for_resale_mwh': float,
'sales_mwh': float,
'sales_revenue': float,
'sales_to_ultimate_consumers_mwh': float,
'secondary_transportation_mode_code': pd.StringDtype(),
'sector_id': pd.Int64Dtype(),
'sector_name': pd.StringDtype(),
'service_area': pd.StringDtype(),
'service_type': pd.CategoricalDtype(categories=[
"bundled", "energy", "delivery",
]),
'short_form': pd.BooleanDtype(),
'sold_to_utility_mwh': float,
'solid_fuel_gasification': pd.BooleanDtype(),
'data_source': pd.StringDtype(),
'standard': pd.CategoricalDtype(categories=RELIABILITY_STANDARDS),
'startup_source_code_1': pd.StringDtype(),
'startup_source_code_2': pd.StringDtype(),
'startup_source_code_3': pd.StringDtype(),
'startup_source_code_4': pd.StringDtype(),
'state': pd.StringDtype(),
'state_id_fips': pd.StringDtype(), # Must preserve leading zeroes
'street_address': pd.StringDtype(),
'stoker_tech': pd.BooleanDtype(),
'storage_capacity_mw': float,
'storage_customers': pd.Int64Dtype(),
'subcritical_tech': pd.BooleanDtype(),
'sulfur_content_pct': float,
'summer_capacity_mw': float,
'summer_capacity_estimate': pd.BooleanDtype(),
# TODO: check if there is any data pre-2016
'summer_estimated_capability_mw': float,
'summer_peak_demand_mw': float,
'supercritical_tech': pd.BooleanDtype(),
'supplier_name': pd.StringDtype(),
'switch_oil_gas': pd.BooleanDtype(),
'syncronized_transmission_grid': pd.BooleanDtype(),
# Added by AES for NM & DG tech table (might want to consider merging with another fuel label)
'tech_class': pd.CategoricalDtype(categories=TECH_CLASSES),
'technology_description': pd.StringDtype(),
'time_cold_shutdown_full_load_code': pd.StringDtype(),
'time_of_use_pricing_program': pd.BooleanDtype(),
'time_responsive_programs': pd.BooleanDtype(),
'time_responsiveness_customers': pd.Int64Dtype(),
'timezone': pd.StringDtype(),
'topping_bottoming_code': pd.StringDtype(),
'total': float,
'total_capacity_less_1_mw': float,
'total_meters': pd.Int64Dtype(),
'total_disposition_mwh': float,
'total_energy_losses_mwh': float,
'total_sources_mwh': float,
'transmission': float,
'transmission_activity': pd.BooleanDtype(),
'transmission_by_other_losses_mwh': float,
'transmission_distribution_owner_id': pd.Int64Dtype(),
'transmission_distribution_owner_name': pd.StringDtype(),
'transmission_distribution_owner_state': pd.StringDtype(),
'turbines_inverters_hydrokinetics': float,
'turbines_num': pd.Int64Dtype(), # TODO: check if any turbines show up pre-2016
'ultrasupercritical_tech': pd.BooleanDtype(),
'unbundled_revenues': float,
'unit_id_eia': pd.StringDtype(),
'unit_id_pudl': pd.Int64Dtype(),
'uprate_derate_completed_date': 'datetime64[ns]',
'uprate_derate_during_year': pd.BooleanDtype(),
'utility_id_eia': pd.Int64Dtype(),
'utility_id_pudl': pd.Int64Dtype(),
'utility_name_eia': pd.StringDtype(),
'utility_owned_capacity_mw': float, # Added by AES for NNM table
'variable_peak_pricing_program': pd.BooleanDtype(), # Added by AES for DP table
'virtual_capacity_mw': float, # Added by AES for NM table
'virtual_customers': pd.Int64Dtype(), # Added by AES for NM table
'water_heater': pd.Int64Dtype(), # Added by AES for DR table
'water_source': pd.StringDtype(),
'weighted_average_life_years': float,
'wheeled_power_delivered_mwh': float,
'wheeled_power_recieved_mwh': float,
'wholesale_marketing_activity': pd.BooleanDtype(),
'wholesale_power_purchases_mwh': float,
'winter_capacity_mw': float,
'winter_capacity_estimate': pd.BooleanDtype(),
'winter_estimated_capability_mw': float,
'winter_peak_demand_mw': float,
# 'with_med': float,
# 'with_med_minus_los': float,
# 'without_med': float,
'zip_code': pd.StringDtype(),
'zip_code_4': pd.StringDtype()
},
'depreciation': {
'utility_id_ferc1': | pd.Int64Dtype() | pandas.Int64Dtype |
# -*- coding: utf-8 -*-
"""
Library for udacity blogpost project.
Created on Fri Jan 29 15:39:24 2021
@author: KRS1BBH
"""
import pandas as pd
import os, glob
from datetime import datetime
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
def coef_weights(coefficients, X_train):
'''
INPUT:
coefficients - the coefficients of the linear model
X_train - the training data, so the column names can be used
OUTPUT:
coefs_df - a dataframe holding the coefficient, estimate, and abs(estimate)
Provides a dataframe that can be used to understand the most influential coefficients
in a linear model by providing the coefficient estimates along with the name of the
variable attached to the coefficient.
'''
coefs_df = pd.DataFrame()
coefs_df['est_int'] = X_train.columns
coefs_df['coefs'] = coefficients
coefs_df['abs_coefs'] = np.abs(coefficients)
coefs_df = coefs_df.sort_values('abs_coefs', ascending=False)
return coefs_df
def load_data(path_abnb):
"""
Parameters
----------
path_abnb : string
path to folder with airbnb data organized by city folders each containing the three files "reviews.csv, calendar.csv, and listings.csv".
Returns
-------
listings : dataframe
Dataframe with appended file content of all city subfolders in path_abnb. Only mutual columns are kept.
calendar : dataframe
Dataframe with appended file content of all city subfolders in path_abnb..
review : dataframe
Dataframe with appended file content of all city subfolders in path_abnb..
"""
cities = os.listdir(path_abnb)
for n in range(len(cities)):
if n==0:
#read in listings and calendar files
listings=pd.read_csv(path_abnb+cities[n]+"/listings.csv")
listings['folder']=cities[n]
calendar= | pd.read_csv(path_abnb+cities[n]+"/calendar.csv") | pandas.read_csv |
# Imports: standard library
import os
import logging
from typing import Any, Set, Dict, List, Optional
# Imports: third party
import numpy as np
import pandas as pd
# Imports: first party
from definitions.edw import EDW_FILES
from definitions.icu import BEDMASTER_EXT
from ingest.icu.utils import get_files_in_directory
from ingest.icu.readers import EDWReader, BedmasterReader
from tensorize.bedmaster.bedmaster_stats import BedmasterStats
EXPECTED_FILES = []
for FILE in EDW_FILES:
EXPECTED_FILES.append(EDW_FILES[FILE]["name"])
EXPECTED_FILES.remove(EDW_FILES["adt_file"]["name"])
class PreTensorizeExplorer:
"""
Class that creates summary data for a set of Bedmaster and edw data.
It is used to organize this data and create a csv with all the
information.
"""
def __init__(
self,
bedmaster: str,
edw: str,
xref: str,
patient_csv: str,
):
self.bedmaster = bedmaster
self.edw = edw
self.xref = xref
self.reset()
self.xref_fields = [
"edw_mrns",
"bedmaster_mrns",
"common_mrns",
"cross_referenced_mrns",
"edw_csns",
"bedmaster_csns",
"common_csns",
"cross_referenced_csns",
"bedmaster_files",
"cross_referenced_bedmaster_files",
]
self.edw_fields = [
"Male",
"Female",
"Deceased",
"Alive",
"age",
"weight (lbs)",
"height (m)",
"length_stay (h)",
"transfer_in",
]
self.signal_fields = ["signal", "count", "total", "%", "source"]
self.edw_mrns, self.edw_csns, self.edw_mrns_csns = self.get_mrns_and_csns(
patient_csv,
)
def reset(self, signals: Any = None):
"""
Function to reset some parameters.
:param signals: <List[str]> list of signals to calculate their
summary statistics. If no signal is specified,
statistics for all signals are calculated.
"""
self.signals_summary: Dict[str, Dict[str, Set[int]]] = {}
self.signals = signals
self.summary = {
"Male": 0,
"Female": 0,
"Deceased": 0,
"Alive": 0,
"max_age": 0,
"min_age": np.inf,
"mean_age": 0,
"max_weight": 0,
"min_weight": np.inf,
"mean_weight": 0,
"max_height": 0,
"min_height": np.inf,
"mean_height": 0,
"max_length_stay": 0,
"min_length_stay": np.inf,
"mean_length_stay": 0,
"earliest_transfer_in": | pd.to_datetime("2200") | pandas.to_datetime |
# General imports
import numpy as np
import pandas as pd
import os, sys, gc, warnings, random, datetime, math
from sklearn import metrics
from sklearn.model_selection import train_test_split, KFold,GroupShuffleSplit
from sklearn.preprocessing import LabelEncoder
import seaborn as sns
import matplotlib.pyplot as plt
warnings.filterwarnings('ignore')
########################### Helpers
#################################################################################
## Seeder
# :seed to make all processes deterministic # type: int
def seed_everything(seed=0):
random.seed(seed)
np.random.seed(seed)
## Global frequency encoding
def frequency_encoding(df, columns, self_encoding=False):
for col in columns:
fq_encode = df[col].value_counts(dropna=False).to_dict()
if self_encoding:
df[col] = df[col].map(fq_encode)
else:
df[col+'_fq_enc'] = df[col].map(fq_encode)
return df
def values_normalization(dt_df, periods, columns, enc_type='both'):
for period in periods:
for col in columns:
new_col = col +'_'+ period
dt_df[col] = dt_df[col].astype(float)
temp_min = dt_df.groupby([period])[col].agg(['min']).reset_index()
temp_min.index = temp_min[period].values
temp_min = temp_min['min'].to_dict()
temp_max = dt_df.groupby([period])[col].agg(['max']).reset_index()
temp_max.index = temp_max[period].values
temp_max = temp_max['max'].to_dict()
temp_mean = dt_df.groupby([period])[col].agg(['mean']).reset_index()
temp_mean.index = temp_mean[period].values
temp_mean = temp_mean['mean'].to_dict()
temp_std = dt_df.groupby([period])[col].agg(['std']).reset_index()
temp_std.index = temp_std[period].values
temp_std = temp_std['std'].to_dict()
dt_df['temp_min'] = dt_df[period].map(temp_min)
dt_df['temp_max'] = dt_df[period].map(temp_max)
dt_df['temp_mean'] = dt_df[period].map(temp_mean)
dt_df['temp_std'] = dt_df[period].map(temp_std)
if enc_type=='both':
dt_df[new_col+'_min_max'] = (dt_df[col]-dt_df['temp_min'])/(dt_df['temp_max']-dt_df['temp_min'])
dt_df[new_col+'_std_score'] = (dt_df[col]-dt_df['temp_mean'])/(dt_df['temp_std'])
elif enc_type=='norm':
dt_df[new_col+'_std_score'] = (dt_df[col]-dt_df['temp_mean'])/(dt_df['temp_std'])
elif enc_type=='min_max':
dt_df[new_col+'_min_max'] = (dt_df[col]-dt_df['temp_min'])/(dt_df['temp_max']-dt_df['temp_min'])
del dt_df['temp_min'],dt_df['temp_max'],dt_df['temp_mean'],dt_df['temp_std']
return dt_df
def get_new_columns(temp_list):
temp_list = [col for col in list(full_df) if col not in temp_list]
temp_list.sort()
temp_list2 = [col if col not in remove_features else '-' for col in temp_list ]
temp_list2.sort()
temp_list = {'New columns (including dummy)': temp_list,
'New Features': temp_list2}
temp_list = pd.DataFrame.from_dict(temp_list)
return temp_list
########################### Vars
#################################################################################
SEED = 42
seed_everything(SEED)
LOCAL_TEST = True
MAKE_TESTS = True
TARGET = 'isFraud'
########################### Model params
lgb_params = {
'objective':'binary',
'boosting_type':'gbdt',
'metric':'auc',
'n_jobs':-1,
'learning_rate':0.01,
'num_leaves': 2**8,
'max_depth':-1,
'tree_learner':'serial',
'colsample_bytree': 0.7,
'subsample_freq':1,
'subsample':0.7,
'n_estimators':80000,
'max_bin':255,
'verbose':-1,
'seed': SEED,
'early_stopping_rounds':100,
}
########################### DATA LOAD
#################################################################################
print('Load Data')
train_df = | pd.read_pickle('../input/ieee-data-minification-private/train_transaction.pkl') | pandas.read_pickle |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import numpy as np
import pandas as pd
import scipy.ndimage
import skimage.morphology
import sklearn.mixture
class HDoG_CPU(object):
def __init__(self, width=2560, height=2160, depth=None, sigma_xy=(4.0, 6.0), sigma_z=(1.8,2.7),
radius_small=(24,3), radius_large=(100,5), min_intensity=1000, gamma=1.0):
self.width = width
self.height = height
self.depth = depth
if type(sigma_xy) in [float,int]:
self.sigma_xy = (sigma_xy, sigma_xy*1.5)
else:
self.sigma_xy = sigma_xy
if type(sigma_z) in [float,int]:
self.sigma_z = (sigma_z, sigma_z*1.5)
else:
self.sigma_z = sigma_z
if not radius_small:
self.radius_small_xy = int(self.sigma_xy[1]*4)
self.radius_small_z = int(self.sigma_z[1]*2)
else:
self.radius_small_xy = radius_small[0]
self.radius_small_z = radius_small[1]
self.size_small = (self.radius_small_z*2+1, self.radius_small_xy*2+1, self.radius_small_xy*2+1)
if not radius_large:
self.radius_large_xy = int(self.sigma_xy[1]*30)
self.radius_large_xy = int(self.sigma_z[1]*10)
else:
self.radius_large_xy = radius_large[0]
self.radius_large_z = radius_large[1]
self.size_large = (self.radius_large_z*2+1, self.radius_large_xy*2+1, self.radius_large_xy*2+1)
self.min_intensity = min_intensity
self.gamma = gamma
self.normalizer = (self.sigma_xy[0]**(gamma*2)) * (self.sigma_z[0]**gamma)
def load_images(self, list_images, dtype=np.uint16):
imgs = []
for path in list_images:
img = np.fromfile(path, dtype=dtype).reshape(self.height, self.width)
imgs.append(img)
imgs = np.array(imgs)
self.depth = imgs.shape[0]
return imgs
def Normalize(self, src_img):
dilation_l_img = scipy.ndimage.filters.uniform_filter(
scipy.ndimage.morphology.grey_dilation(src_img, size=self.size_large, mode="nearest").astype(np.float32),
size=self.size_large, mode="constant", cval=0)
erosion_l_img = scipy.ndimage.filters.uniform_filter(
scipy.ndimage.morphology.grey_erosion(src_img, size=self.size_large, mode="nearest").astype(np.float32),
size=self.size_large, mode="constant", cval=0)
intensity = src_img.astype(np.float32)
norm_img = (intensity >= self.min_intensity) * intensity / (dilation_l_img - erosion_l_img)
return norm_img
def DoGFilter(self, src_img):
temp1 = scipy.ndimage.filters.gaussian_filter(
src_img.astype(np.float32),
sigma=(self.sigma_z[0],self.sigma_xy[0],self.sigma_xy[0]),
truncate=2.0, mode="constant", cval=0)
temp2 = scipy.ndimage.filters.gaussian_filter(
src_img.astype(np.float32),
sigma=(self.sigma_z[1],self.sigma_xy[1],self.sigma_xy[1]),
truncate=2.0, mode="constant", cval=0)
dog_img = (temp1 - temp2) * self.normalizer
return dog_img
def HessianPDFilter(self, dog_img):
Hz,Hy,Hx = np.gradient(dog_img)
Hzz,Hyz,Hxz = np.gradient(Hz)
Hyz,Hyy,Hxy = np.gradient(Hy)
Hxz,Hxy,Hxx = np.gradient(Hx)
det_img = Hxx*Hyy*Hzz + 2*Hxy*Hyz*Hxz - Hxx*Hyz*Hyz - Hyy*Hxz*Hxz - Hzz*Hxy*Hxy
pd_img = np.bitwise_and(np.bitwise_and(Hxx < 0, Hxx*Hyy-Hxy*Hxy > 0), det_img < 0)
hessian_img = np.array([Hxx,Hxy,Hxz,Hyy,Hyz,Hzz])
return pd_img, hessian_img
def ScaleResponse(self, scale_img, pd_img):
response = np.sum(scale_img*pd_img) / np.sum(pd_img)
return response
def CCL(self, pd_img):
labels_img = skimage.morphology.label(pd_img)
return labels_img
def RegionalFeatures(self, norm_img, hessian_img, labels_img):
on_region = np.nonzero(labels_img)
labels_list = labels_img[on_region]
num_labels = np.max(labels_list)
# max intensity
max_normalized = scipy.ndimage.maximum(norm_img, labels=labels_img, index=range(1, num_labels+1))
# region size
ns = np.ones(len(labels_list))
region_size = np.bincount(labels_list-1, weights=ns)
# Regional Hessian Eigenvalues
HT = np.empty((6, num_labels))
for i in range(6):
HT[i] = np.bincount(labels_list-1, weights=hessian_img[i][on_region])
HT_mat = np.array([
[HT[0],HT[1],HT[2]],
[HT[1],HT[3],HT[4]],
[HT[2],HT[4],HT[5]]
]).T
eigenvals = np.linalg.eigvalsh(HT_mat)
l1,l2,l3 = eigenvals[:,0],eigenvals[:,1], eigenvals[:,2]
blobness = l3*l3 / (l1*l2) #l3/np.sqrt(l1*l2)
structureness = l1*l1 + l2*l2 + l3*l3 #np.sqrt()
# following code is needed if the label is not relabeled as 1,2,3,...
#label_values = np.array(sorted(np.unique(labels))) # including background(0)
#mp = np.arange(0,np.max(label_values)+1)
#mp[label_values] = np.arange(label_values.shape[0])
#labels_new = mp[labels]
zgrid,ygrid,xgrid = np.mgrid[0:self.depth, 0:self.height, 0:self.width]
centroid_x = np.bincount(labels_img.flatten(), weights=xgrid.flatten())[1:] / region_size#np.bincount(labels_img.flatten())[1:]
centroid_y = np.bincount(labels_img.flatten(), weights=ygrid.flatten())[1:] / region_size#np.bincount(labels_img.flatten())[1:]
centroid_z = np.bincount(labels_img.flatten(), weights=zgrid.flatten())[1:] / region_size#np.bincount(labels_img.flatten())[1:]
df = pd.DataFrame({
"index": pd.Series(np.arange(num_labels)),
"intensity": pd.Series(max_normalized),
"size": pd.Series(region_size),
"blobness":pd.Series(blobness),
"structureness":pd.Series(structureness),
"centroid_x": | pd.Series(centroid_x) | pandas.Series |
import requests
from io import StringIO
import pandas as pd
import numpy as np
from tqdm import tqdm
from .financial_statement import html2db
from requests.exceptions import ConnectionError
from requests.exceptions import ReadTimeout
import warnings
def requests_get(*args1, **args2):
i = 3
while i >= 0:
try:
return requests.get(*args1, **args2)
except (ConnectionError, ReadTimeout) as error:
print(error)
print('retry one more time after 60s', i, 'times left')
time.sleep(60)
i -= 1
return pd.DataFrame()
def requests_post(*args1, **args2):
i = 3
while i >= 0:
try:
return requests.post(*args1, **args2)
except (ConnectionError, ReadTimeout) as error:
print(error)
print('retry one more time after 60s', i, 'times left')
time.sleep(60)
i -= 1
return pd.DataFrame()
warnings.simplefilter(action='ignore', category=FutureWarning)
def crawl_price(date):
datestr = date.strftime('%Y%m%d')
try:
r = requests_post('https://www.twse.com.tw/exchangeReport/MI_INDEX?response=csv&date=' + datestr + '&type=ALLBUT0999')
except Exception as e:
print('**WARRN: cannot get stock price at', datestr)
print(e)
return None
content = r.text.replace('=', '')
lines = content.split('\n')
lines = list(filter(lambda l:len(l.split('",')) > 10, lines))
content = "\n".join(lines)
if content == '':
return None
df = pd.read_csv(StringIO(content))
df = df.astype(str)
df = df.apply(lambda s: s.str.replace(',', ''))
df['date'] = | pd.to_datetime(date) | pandas.to_datetime |
import unittest
import numpy as np
import pandas as pd
from src.tools.movement import movement
class MovementTest(unittest.TestCase):
@staticmethod
def test_long_only():
pd.testing.assert_series_equal(
pd.Series([np.NAN, 10, 10]),
movement(
pd.DataFrame({"longitudeE7": [0, 10, 20], "latitudeE7": [0, 0, 0]})
),
check_exact=False,
)
@staticmethod
def test_lat_only():
pd.testing.assert_series_equal(
pd.Series([np.NAN, 10, 10]),
movement(
pd.DataFrame({"longitudeE7": [0, 0, 0], "latitudeE7": [0, 10, 20]})
),
check_exact=False,
)
@staticmethod
def test_diagonal():
pd.testing.assert_series_equal(
| pd.Series([np.NAN, 14.1421, 14.1421]) | pandas.Series |
"""Module containing different I/O functions to load time log data, subject condition lists, questionnaire data, etc."""
import datetime
from pathlib import Path
from typing import Dict, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
import pytz
from nilspodlib import Dataset
from biopsykit.utils._datatype_validation_helper import _assert_file_extension, _assert_has_columns, _assert_is_dtype
from biopsykit.utils._types import path_t
from biopsykit.utils.dataframe_handling import convert_nan
from biopsykit.utils.datatype_helper import (
CodebookDataFrame,
SubjectConditionDataFrame,
SubjectConditionDict,
_CodebookDataFrame,
_SubjectConditionDataFrame,
is_codebook_dataframe,
is_subject_condition_dataframe,
is_subject_condition_dict,
)
from biopsykit.utils.exceptions import ValidationError
from biopsykit.utils.file_handling import is_excel_file
from biopsykit.utils.time import tz
__all__ = [
"load_long_format_csv",
"load_time_log",
"load_subject_condition_list",
"load_questionnaire_data",
"load_pandas_dict_excel",
"load_codebook",
"convert_time_log_datetime",
"write_pandas_dict_excel",
"write_result_dict",
]
def load_long_format_csv(file_path: path_t, index_cols: Optional[Union[str, Sequence[str]]] = None) -> pd.DataFrame:
"""Load dataframe stored as long-format from file.
Parameters
----------
file_path : :class:`~pathlib.Path` or str
path to file. Must be a csv file
index_cols : str or list of str, optional
column name (or list of such) of index columns to be used as MultiIndex in the resulting long-format
dataframe or ``None`` to use all columns except the last one as index columns.
Default: ``None``
Returns
-------
:class:`~pandas.DataFrame`
dataframe in long-format
"""
# ensure pathlib
file_path = Path(file_path)
_assert_file_extension(file_path, expected_extension=[".csv"])
data = pd.read_csv(file_path)
if index_cols is None:
index_cols = list(data.columns)[:-1]
_assert_has_columns(data, [index_cols])
return data.set_index(index_cols)
def load_time_log(
file_path: path_t,
subject_col: Optional[str] = None,
condition_col: Optional[str] = None,
additional_index_cols: Optional[Union[str, Sequence[str]]] = None,
phase_cols: Optional[Union[Sequence[str], Dict[str, str]]] = None,
continuous_time: Optional[bool] = True,
**kwargs,
) -> pd.DataFrame:
"""Load time log information from file.
This function can be used to load a file containing "time logs", i.e.,
information about start and stop times of recordings or recording phases per subject.
Parameters
----------
file_path : :class:`~pathlib.Path` or str
path to time log file. Must either be an Excel or csv file
subject_col : str, optional
name of column containing subject IDs or ``None`` to use default column name ``subject``.
According to BioPsyKit's convention, the subject ID column is expected to have the name ``subject``.
If the subject ID column in the file has another name, the column will be renamed in the dataframe
returned by this function.
condition_col : str, optional
name of column containing condition assignments or ``None`` to use default column name ``condition``.
According to BioPsyKit's convention, the condition column is expected to have the name ``condition``.
If the condition column in the file has another name, the column will be renamed in the dataframe
returned by this function.
additional_index_cols : str, list of str, optional
additional index levels to be added to the dataframe.
Can either be a string or a list strings to indicate column name(s) that should be used as index level(s),
or ``None`` for no additional index levels. Default: ``None``
phase_cols : list of str or dict, optional
list of column names that contain time log information or ``None`` to use all columns.
If the column names of the time log dataframe should have different names than the columns in the file,
a dict specifying the mapping (column_name : new_column_name) can be passed. Default: ``None``
continuous_time: bool, optional
flag indicating whether phases are continuous, i.e., whether the end of the previous phase is also the
beginning of the next phase or not. Default: ``True``.
If ``continuous_time`` is set to ``False``, the start and end columns of all phases must have the
suffixes "_start" and "_end", respectively
**kwargs
Additional parameters that are passed to :func:`pandas.read_csv` or :func:`pandas.read_excel`
Returns
-------
:class:`~pandas.DataFrame`
dataframe with time log information
Raises
------
:exc:`~biopsykit.utils.exceptions.FileExtensionError`
if file format is none of [".xls", ".xlsx", ".csv"]
:exc:`~biopsykit.utils.exceptions.ValidationError`
if ``continuous_time`` is ``False``, but "start" and "end" time columns of each phase do not match or
none of these columns were found in the dataframe
Examples
--------
>>> import biopsykit as bp
>>> file_path = "./timelog.csv"
>>> # Example 1:
>>> # load time log file into a pandas dataframe
>>> data = bp.io.load_time_log(file_path)
>>> # Example 2:
>>> # load time log file into a pandas dataframe and specify the "ID" column
>>> # (instead of the default "subject" column) in the time log file to be the index of the dataframe
>>> data = bp.io.load_time_log(file_path, subject_col="ID")
>>> # Example 3:
>>> # load time log file into a pandas dataframe and specify the columns "Phase1", "Phase2", and "Phase3"
>>> # to be used for extracting time information
>>> data = bp.io.load_time_log(
>>> file_path, phase_cols=["Phase1", "Phase2", "Phase3"]
>>> )
>>> # Example 4:
>>> # load time log file into a pandas dataframe and specify the column "ID" as subject column, the column "Group"
>>> # as condition column, as well as the column "Time" as additional index column.
>>> data = bp.io.load_time_log(file_path,
>>> subject_col="ID",
>>> condition_col="Group",
>>> additional_index_cols=["Time"],
>>> phase_cols=["Phase1", "Phase2", "Phase3"]
>>> )
"""
# ensure pathlib
file_path = Path(file_path)
_assert_file_extension(file_path, expected_extension=[".xls", ".xlsx", ".csv"])
# assert times in the excel sheet are imported as strings,
# not to be automatically converted into datetime objects
kwargs["dtype"] = str
data = _load_dataframe(file_path, **kwargs)
data, index_cols = _sanitize_index_cols(data, subject_col, condition_col, additional_index_cols)
data = _apply_index_cols(data, index_cols=index_cols)
data = _apply_phase_cols(data, phase_cols=phase_cols)
data.columns.name = "phase"
if not continuous_time:
data = _parse_time_log_not_continuous(data, index_cols)
for val in data.values.flatten():
if val is np.nan:
continue
_assert_is_dtype(val, str)
return data
def _apply_phase_cols(data: pd.DataFrame, phase_cols: Union[Dict[str, Sequence[str]], Sequence[str]]) -> pd.DataFrame:
new_phase_cols = None
if isinstance(phase_cols, dict):
new_phase_cols = phase_cols
phase_cols = list(phase_cols.keys())
if phase_cols:
_assert_has_columns(data, [phase_cols])
data = data.loc[:, phase_cols]
if new_phase_cols:
data = data.rename(columns=new_phase_cols)
return data
def _parse_time_log_not_continuous(
data: pd.DataFrame, index_cols: Union[str, Sequence[str], Dict[str, str]]
) -> pd.DataFrame:
start_cols = np.squeeze(data.columns.str.extract(r"(\w+)_start").dropna().values)
end_cols = np.squeeze(data.columns.str.extract(r"(\w+)_end").dropna().values)
if start_cols.size == 0:
raise ValidationError(
"No 'start' and 'end' columns were found. "
"Make sure that each phase has columns with 'start' and 'end' suffixes!"
)
if not np.array_equal(start_cols, end_cols):
raise ValidationError("Not all phases have 'start' and 'end' columns!")
if index_cols is None:
index_cols = [s for s in ["subject", "condition"] if s in data.columns]
data = data.set_index(index_cols)
if isinstance(index_cols, dict):
index_cols = data.index.names
data = pd.wide_to_long(
data.reset_index(),
stubnames=start_cols,
i=index_cols,
j="time",
sep="_",
suffix="(start|end)",
)
# ensure that "start" is always before "end"
data = data.reindex(["start", "end"], level=-1)
# unstack start|end level
data = data.unstack()
# set name of outer index level
data.columns = data.columns.set_names("phase", level=0)
return data
def load_subject_condition_list(
file_path: path_t,
subject_col: Optional[str] = None,
condition_col: Optional[str] = None,
return_dict: Optional[bool] = False,
**kwargs,
) -> Union[SubjectConditionDataFrame, SubjectConditionDict]:
"""Load subject condition assignment from file.
This function can be used to load a file that contains the assignment of subject IDs to study conditions.
It will return a dataframe or a dictionary that complies with BioPsyKit's naming convention, i.e.,
the subject ID index will be named ``subject`` and the condition column will be named ``condition``.
Parameters
----------
file_path : :class:`~pathlib.Path` or str
path to time log file. Must either be an Excel or csv file
subject_col : str, optional
name of column containing subject IDs or ``None`` to use default column name ``subject``.
According to BioPsyKit's convention, the subject ID column is expected to have the name ``subject``.
If the subject ID column in the file has another name, the column will be renamed in the dataframe
returned by this function.
condition_col : str, optional
name of column containing condition assignments or ``None`` to use default column name ``condition``.
According to BioPsyKit's convention, the condition column is expected to have the name ``condition``.
If the condition column in the file has another name, the column will be renamed in the dataframe
returned by this function.
return_dict : bool, optional
whether to return a dict with subject IDs per condition (``True``) or a dataframe (``False``).
Default: ``False``
**kwargs
Additional parameters that are passed tos :func:`pandas.read_csv` or :func:`pandas.read_excel`
Returns
-------
:class:`~biopsykit.utils.datatype_helper.SubjectConditionDataFrame` or
:class:`~biopsykit.utils.datatype_helper.SubjectConditionDict`
a standardized pandas dataframe with subject IDs and condition assignments (if ``return_dict`` is ``False``) or
a standardized dict with subject IDs per group (if ``return_dict`` is ``True``)
Raises
------
:exc:`~biopsykit.utils.exceptions.FileExtensionError`
if file is not a csv or Excel file
:exc:`~biopsykit.utils.exceptions.ValidationError`
if result is not a :class:`~biopsykit.utils.datatype_helper.SubjectConditionDataFrame` or a
:class:`~biopsykit.utils.datatype_helper.SubjectConditionDict`
"""
# ensure pathlib
file_path = Path(file_path)
_assert_file_extension(file_path, expected_extension=[".xls", ".xlsx", ".csv"])
data = _load_dataframe(file_path, **kwargs)
if subject_col is None:
subject_col = "subject"
if condition_col is None:
condition_col = "condition"
_assert_has_columns(data, [[subject_col, condition_col]])
if subject_col != "subject":
# rename column
subject_col = {subject_col: "subject"}
data = data.rename(columns=subject_col)
subject_col = "subject"
if condition_col != "condition":
# rename column
condition_col = {condition_col: "condition"}
data = data.rename(columns=condition_col)
condition_col = "condition"
data = data.set_index(subject_col)
if return_dict:
data = data.groupby(condition_col).groups
is_subject_condition_dict(data)
return data
is_subject_condition_dataframe(data)
return _SubjectConditionDataFrame(data)
def _get_subject_col(data: pd.DataFrame, subject_col: str):
if subject_col is None:
subject_col = "subject"
_assert_is_dtype(subject_col, str)
_assert_has_columns(data, [[subject_col]])
return subject_col
def _sanitize_index_cols(
data: pd.DataFrame,
subject_col: str,
condition_col: Optional[str],
additional_index_cols: Optional[Union[str, Sequence[str]]],
) -> Tuple[pd.DataFrame, Sequence[str]]:
subject_col = _get_subject_col(data, subject_col)
data = data.rename(columns={subject_col: "subject"})
subject_col = "subject"
index_cols = [subject_col]
if condition_col is not None:
_assert_is_dtype(condition_col, str)
_assert_has_columns(data, [[condition_col]])
data = data.rename(columns={condition_col: "condition"})
condition_col = "condition"
index_cols.append(condition_col)
elif "condition" in data.columns:
index_cols.append("condition")
if additional_index_cols is None:
additional_index_cols = []
if isinstance(additional_index_cols, str):
additional_index_cols = [additional_index_cols]
index_cols = index_cols + additional_index_cols
return data, index_cols
def load_questionnaire_data(
file_path: path_t,
subject_col: Optional[str] = None,
condition_col: Optional[str] = None,
additional_index_cols: Optional[Union[str, Sequence[str]]] = None,
replace_missing_vals: Optional[bool] = True,
remove_nan_rows: Optional[bool] = True,
sheet_name: Optional[Union[str, int]] = 0,
**kwargs,
) -> pd.DataFrame:
"""Load questionnaire data from file.
The resulting dataframe will comply with BioPsyKit's naming conventions, i.e., the subject ID index will be
named ``subject`` and a potential condition index will be named ``condition``.
Parameters
----------
file_path : :class:`~pathlib.Path` or str
path to time log file. Must either be an Excel or csv file
subject_col : str, optional
name of column containing subject IDs or ``None`` to use default column name ``subject``.
According to BioPsyKit's convention, the subject ID column is expected to have the name ``subject``.
If the subject ID column in the file has another name, the column will be renamed in the dataframe
returned by this function.
condition_col : str, optional
name of column containing condition assignments or ``None`` to use default column name ``condition``.
According to BioPsyKit's convention, the condition column is expected to have the name ``condition``.
If the condition column in the file has another name, the column will be renamed in the dataframe
returned by this function.
additional_index_cols : str, list of str, optional
additional index levels to be added to the dataframe.
Can either be a string or a list strings to indicate column name(s) that should be used as index level(s),
or ``None`` for no additional index levels. Default: ``None``
replace_missing_vals : bool, optional
``True`` to replace encoded "missing values" from software like SPSS (e.g. -77, -99, or -66)
to "actual" missing values (NaN).
Default: ``True``
remove_nan_rows : bool, optional
``True`` to remove rows that only contain NaN values (except the index cols), ``False`` to keep NaN rows.
Default: ``True``
sheet_name : str or int, optional
sheet_name identifier (str) or sheet_name index (int) if file is an Excel file.
Default: 0 (i.e. first sheet in Excel file)
Returns
-------
:class:`~pandas.DataFrame`
dataframe with imported questionnaire data
Raises
------
:class:`~biopsykit.utils.exceptions.FileExtensionError`
if file format is none of [".xls", ".xlsx", ".csv"]
"""
# ensure pathlib
file_path = Path(file_path)
_assert_file_extension(file_path, expected_extension=[".xls", ".xlsx", ".csv"])
if file_path.suffix != ".csv":
kwargs["sheet_name"] = sheet_name
data = _load_dataframe(file_path, **kwargs)
data, index_cols = _sanitize_index_cols(data, subject_col, condition_col, additional_index_cols)
data = _apply_index_cols(data, index_cols=index_cols)
if replace_missing_vals:
data = convert_nan(data)
if remove_nan_rows:
data = data.dropna(how="all")
return data
def load_codebook(file_path: path_t, **kwargs) -> CodebookDataFrame:
"""Load codebook from file.
A codebook is used to convert numerical values from a dataframe (e.g., from questionnaire data)
to categorical values.
Parameters
----------
file_path : :class:`~pathlib.Path` or str
file path to codebook
**kwargs
additional arguments to pass to :func:`pandas.read_csv` or :func:`pandas.read_excel`
Returns
-------
:class:`~pandas.DataFrame`
:obj:`~biopsykit.utils.datatype_helper.CodebookDataFrame`, a dataframe in a standardized format
See Also
--------
:func:`~biopsykit.utils.dataframe_handling.apply_codebook`
apply codebook to data
"""
# ensure pathlib
file_path = Path(file_path)
_assert_file_extension(file_path, expected_extension=[".xls", ".xlsx", ".csv"])
if file_path.suffix in [".xls", ".xlsx"]:
data = pd.read_excel(file_path, **kwargs)
else:
data = | pd.read_csv(file_path, **kwargs) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Thu May 24 17:46:53 2018
@author: SilverDoe
"""
import pandas as pd
data = [1,2,3,4,5]
df = pd.DataFrame(data)
print(df)
import pandas as pd
data = [['Alex',10],['Bob',12],['Clarke',13]]
df = pd.DataFrame(data,columns=['Name','Age'])
print(df)
import pandas as pd
data = [['Alex',10],['Bob',12],['Clarke',13]]
df = pd.DataFrame(data,columns=['Name','Age'],dtype=float)
print(df)
# Create a DataFrame from Dict of ndarrays / Lists
'''
All the ndarrays must be of same length. If index is passed,
then the length of the index should equal to the length of the arrays.
If no index is passed, then by default, index will be range(n), where n is the array length
'''
import pandas as pd
data = {'Name':['Tom', 'Jack', 'Steve', 'Ricky'],'Age':[28,34,29,42]}
df = | pd.DataFrame(data) | pandas.DataFrame |
from __future__ import division
from datetime import datetime
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
import pandas as pd
import numpy as np
import random
from nose.tools import assert_almost_equal as aae
import bt
import bt.algos as algos
def test_algo_name():
class TestAlgo(algos.Algo):
pass
actual = TestAlgo()
assert actual.name == 'TestAlgo'
class DummyAlgo(algos.Algo):
def __init__(self, return_value=True):
self.return_value = return_value
self.called = False
def __call__(self, target):
self.called = True
return self.return_value
def test_algo_stack():
algo1 = DummyAlgo(return_value=True)
algo2 = DummyAlgo(return_value=False)
algo3 = DummyAlgo(return_value=True)
target = mock.MagicMock()
stack = bt.AlgoStack(algo1, algo2, algo3)
actual = stack(target)
assert not actual
assert algo1.called
assert algo2.called
assert not algo3.called
def test_print_temp_data():
target = mock.MagicMock()
target.temp={}
target.temp['selected'] = ['c1','c2']
target.temp['weights'] = [0.5,0.5]
algo = algos.PrintTempData()
assert algo( target )
algo = algos.PrintTempData( 'Selected: {selected}')
assert algo( target )
def test_print_info():
target = bt.Strategy('s', [])
target.temp={}
algo = algos.PrintInfo()
assert algo( target )
algo = algos.PrintInfo( '{now}: {name}')
assert algo( target )
def test_run_once():
algo = algos.RunOnce()
assert algo(None)
assert not algo(None)
assert not algo(None)
def test_run_period():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunPeriod()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
target.now = None
assert not algo(target)
# run on first date
target.now = dts[0]
assert not algo(target)
# run on first supplied date
target.now = dts[1]
assert algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert not algo(target)
algo = algos.RunPeriod(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
# run on first date
target.now = dts[0]
assert not algo(target)
# first supplied date
target.now = dts[1]
assert not algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert algo(target)
# date not in index
target.now = datetime(2009, 2, 15)
assert not algo(target)
def test_run_daily():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunDaily()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('',[algo]),
data
)
target.data = backtest.data
target.now = dts[1]
assert algo(target)
def test_run_weekly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunWeekly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert not algo(target)
# new week
target.now = dts[3]
assert algo(target)
algo = algos.RunWeekly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert algo(target)
# new week
target.now = dts[3]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8),datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_monthly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunMonthly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert not algo(target)
# new month
target.now = dts[31]
assert algo(target)
algo = algos.RunMonthly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert algo(target)
# new month
target.now = dts[31]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_quarterly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunQuarterly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert not algo(target)
# new quarter
target.now = dts[90]
assert algo(target)
algo = algos.RunQuarterly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert algo(target)
# new quarter
target.now = dts[90]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_yearly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunYearly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert not algo(target)
# new year
target.now = dts[365]
assert algo(target)
algo = algos.RunYearly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert algo(target)
# new year
target.now = dts[365]
assert not algo(target)
def test_run_on_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunOnDate('2010-01-01', '2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
def test_run_if_out_of_bounds():
algo = algos.RunIfOutOfBounds(0.5)
dts = pd.date_range('2010-01-01', periods=3)
s = bt.Strategy('s')
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.temp['selected'] = ['c1', 'c2']
s.temp['weights'] = {'c1': .5, 'c2':.5}
s.update(dts[0])
s.children['c1'] = bt.core.SecurityBase('c1')
s.children['c2'] = bt.core.SecurityBase('c2')
s.children['c1']._weight = 0.5
s.children['c2']._weight = 0.5
assert not algo(s)
s.children['c1']._weight = 0.25
s.children['c2']._weight = 0.75
assert not algo(s)
s.children['c1']._weight = 0.24
s.children['c2']._weight = 0.76
assert algo(s)
s.children['c1']._weight = 0.75
s.children['c2']._weight = 0.25
assert not algo(s)
s.children['c1']._weight = 0.76
s.children['c2']._weight = 0.24
assert algo(s)
def test_run_after_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunAfterDate('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-03')
assert algo(target)
def test_run_after_days():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunAfterDays(3)
assert not algo(target)
assert not algo(target)
assert not algo(target)
assert algo(target)
def test_set_notional():
algo = algos.SetNotional('notional')
s = bt.FixedIncomeStrategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
notional = pd.Series(index=dts[:2], data=[1e6, 5e6])
s.setup( data, notional = notional )
s.update(dts[0])
assert algo(s)
assert s.temp['notional_value'] == 1e6
s.update(dts[1])
assert algo(s)
assert s.temp['notional_value'] == 5e6
s.update(dts[2])
assert not algo(s)
def test_rebalance():
algo = algos.Rebalance()
s = bt.Strategy('s')
dts = | pd.date_range('2010-01-01', periods=3) | pandas.date_range |
import os
import glob
import pandas as pd
game_files=glob.glob(os.path.join(os.getcwd(),'games','*.EVE'))
game_files.sort()
#print(game_files)
game_frames=[]
for game_file in game_files:
game_frame=pd.read_csv(game_file,names=['type','multi2','multi3','multi4','multi5','multi6','event'])
game_frames.append(game_frame)
games=pd.concat(game_frames)
games.loc['games.multi5="??"',['multi5']]=""
regex_function=r'(.LS(\d{4})\d{5})'
identifiers=games.multi2.str.extract(regex_function)
identifiers.fillna(method='ffill',inplace=True)
identifiers.columns=['game_id','year']
games=pd.concat([games,identifiers],axis=1,sort=False)
games.fillna(' ',inplace=True)
games.loc[:,'type']= | pd.Categorical(games.loc[:,'type']) | pandas.Categorical |
import altair as alt
# import streamlit as st
import pandas as pd
# from sklearn.linear_model import LinearRegression
# from sklearn.model_selection import train_test_split
import numpy as np
from vega_datasets import data
import streamlit as st
from sklearn.linear_model import LinearRegression
from PIL import Image
alt.data_transformers.disable_max_rows()
st.set_page_config(page_title="Energy, Climate Change, and Global Responsibility", layout="wide", page_icon="💡")
# @st.cache
def get_energy(with_coords=True):
energy_sources = pd.read_csv('./data/primary-energy-source-bar.csv')
energy_sources = energy_sources.sort_values(['Entity', 'Year'])
for entity in energy_sources.Entity.unique():
indices = energy_sources.Entity == entity
for col in energy_sources.columns:
if col != 'Entity' and col != 'Code' and col != 'Year' and col != 'TotalEnergy':
temp = energy_sources[indices][col]
temp = temp.fillna('ffill')
energy_sources.loc[indices, col] = temp
energy_sources = energy_sources[energy_sources.Year == 2019].copy()
energy_sources['TotalEnergy'] = 0
for col in energy_sources.columns:
if col != 'Entity' and col != 'Code' and col != 'Year' and col != 'TotalEnergy':
energy_sources['TotalEnergy'] += energy_sources[col]
if with_coords == False:
return energy_sources
lats = pd.read_csv('./data/average-latitude-longitude-countries.csv')
energy_sources = energy_sources[energy_sources.Entity != 'Europe']
country_lat = energy_sources.merge(lats, left_on='Entity', right_on='Country')
return country_lat
# @st.cache
def get_energypc(with_coords=True):
population = pd.read_csv('./data/population-past-future.csv')
population = population[~population.Code.isnull()]
population = population[population.Code != 'NaN']
population = population.sort_values(by=['Entity', 'Year'])
for entity in population.Entity.unique():
indices = population.Entity == entity
temp = population[indices]['Population (historical estimates and future projections)']
temp = temp.fillna('ffill')
population.loc[indices, 'Population (historical estimates and future projections)'] = temp
population = population[population.Year == 2019]
population['Entity'] = population.Entity.astype('str')
population = population[population.Entity != 'Europe']
df = get_energy(with_coords)
df['Entity'] = df.Entity.astype('str')
joined = population.merge(df, on='Entity')
joined['EnergyPerCapita'] = joined['TotalEnergy']/joined['Population (historical estimates and future projections)']
if with_coords:
joined['Latitude_x'] = joined['Latitude']
joined['Longitude_x'] = joined['Longitude']
return joined
# @st.cache
def get_fuelco2():
fuel_co2 = pd.read_csv('./data/co2-emissions-by-fuel-line.csv')
fuel_co2 = fuel_co2[(fuel_co2.Year == 2019)]
co2_cols = ['Annual CO2 emissions from oil', 'Annual CO2 emissions from flaring', 'Annual CO2 emissions from cement', 'Annual CO2 emissions from coal', 'Annual CO2 emissions from gas', 'Annual CO2 emissions from other industry']
co2_rename = ['Oil', 'Flaring', 'Cement', 'Coal', 'Gas', 'Other Industries']
fuel_co2 = fuel_co2.rename(index=str, columns=dict(zip(co2_cols, co2_rename)))
fuel_co2['NetCO2'] = 0
for col in co2_rename:
fuel_co2[col] = fuel_co2[col].fillna(0)
fuel_co2['NetCO2'] += fuel_co2[col]
co2_cols = co2_rename
population = pd.read_csv('./data/population-past-future.csv')
population = population[~population.Code.isnull()]
population = population[population.Code != 'NaN']
population = population[population.Year == 2019]
population['Entity'] = population.Entity.astype('str')
population = population[population.Entity != 'Europe']
fuel_co2['Entity'] = fuel_co2.Entity.astype('str')
co2_pc = population.merge(fuel_co2, on='Entity')
co2_pc['NetCO2_PC'] = co2_pc['NetCO2']/co2_pc['Population (historical estimates and future projections)']
for col in co2_cols:
co2_pc[col + '_PC'] = co2_pc[col]/co2_pc['Population (historical estimates and future projections)']
# co2_pc.head()
world_population = 7713468203
for col in co2_cols+['NetCO2']:
co2_pc[col] = co2_pc[col+'_PC']*world_population
return co2_pc, co2_cols
# @st.cache
def get_energy_charts_circle():
url = 'https://cdn.jsdelivr.net/npm/world-atlas@2/countries-110m.json'
selector = alt.selection_single(name="selector", fields=['Entity'])
countries = alt.topo_feature(data.world_110m.url, 'countries')
geo = alt.Chart(countries).mark_geoshape(
fill='lightblue',
stroke='white').properties(
width=580,
height=500
)
country_lat = get_energy()
color_range = alt.Color('TotalEnergy:Q',
scale=alt.Scale(
type='pow', exponent=0.6,
# domain=(temp_min, temp_max),
range=['yellow', 'orange', 'red']
))
circles = alt.Chart(country_lat).mark_circle().encode(
latitude='Latitude:Q',
longitude="Longitude:Q",
size=alt.Size("TotalEnergy:Q", scale=alt.Scale(range=[0, 3000]), legend=None),
tooltip=['Entity'],
color=alt.condition(selector, color_range, alt.value('lightgreen'))
).add_selection(selector)
g1 = geo+circles
joined_lats = get_energypc()
countries = alt.topo_feature(url, 'countries')
geo2 = alt.Chart(countries).mark_geoshape(
fill='lightblue',
stroke='white'
).properties(
width=580,
height=500
).project(
clipExtent= [[0, 0], [580, 360]]
)
color_range2 = alt.Color('EnergyPerCapita:Q',
scale=alt.Scale(
type='pow', exponent=0.6,
# domain=(temp_min, temp_max),
range=['yellow', 'orange', 'red']
)
)
circles2 = alt.Chart(joined_lats).mark_circle().encode(
latitude='Latitude:Q',
longitude="Longitude:Q",
size=alt.Size("EnergyPerCapita:Q", scale=alt.Scale(range=[0, 1000]), legend=None),
tooltip=['Entity', 'EnergyPerCapita:Q'],
color=alt.condition(selector, color_range2, alt.value('lightgreen'))
).add_selection(selector)
g2 = (geo2+circles2)
return (g1&g2).resolve_scale(color='independent'), selector
# return g2, selector
# @st.cache
def get_energypc_chart_heatmap():
url = 'https://cdn.jsdelivr.net/npm/world-atlas@2/countries-110m.json'
selector = alt.selection_single(name="selector", fields=['Entity'])
countries = alt.topo_feature(url, 'countries')
energy_sources = get_energy(with_coords=False)
energy_sources.loc[(energy_sources.Entity == 'United States'), ['Entity']] = 'United States of America'
color_range = alt.Color('TotalEnergy:Q',
scale=alt.Scale(
type='pow', exponent=0.6,
range=['lightblue', 'yellow', 'orange', 'red']
))
base_chart = alt.Chart(countries,
title='Net Energy Consumption by Country'
).mark_geoshape(stroke='white').encode(
color=alt.condition(selector, color_range,
alt.value('lightgray')
),
# color = color_range,
tooltip = [alt.Tooltip('properties.name:N', title='Country'), alt.Tooltip('TotalEnergy:Q', title='Total Energy Consumed (TWh)')]
).transform_lookup(
lookup='properties.name',
from_=alt.LookupData(energy_sources, 'Entity', ['TotalEnergy', 'Country', 'Entity'])
).properties(
width=580,
height=500
).add_selection(selector)
energy_pc = get_energypc(with_coords=False)
energy_pc.loc[(energy_pc.Entity == 'United States'), ['Entity']] = 'United States of America'
pc_chart = alt.Chart(countries,
title='Per Capita Energy Consumption by Country'
).mark_geoshape(stroke='white').encode(
color=alt.condition(selector, alt.Color('EnergyPerCapita:Q',
scale=alt.Scale(
type='pow', exponent=0.6,
range=['lightblue', 'yellow', 'orange', 'red']
)),
alt.value('lightgray')
),
tooltip = [alt.Tooltip('properties.name:N', title='Country'), alt.Tooltip('EnergyPerCapita:Q', title='Per Capita Energy Consumed (TWh/person)')]
).transform_lookup(
lookup='properties.name',
from_=alt.LookupData(energy_pc, 'Entity', ['EnergyPerCapita', 'Country', 'Entity'])
).properties(
width=580,
height=500
).add_selection(selector)
# return (base_chart|pc_chart).resolve_scale(color='independent'), selector
return base_chart, pc_chart, selector
# Credit: https://stackoverflow.com/questions/67997825/python-altair-generate-a-table-on-selection
def get_chart_table(data):
ranked_text = alt.Chart(data).mark_text(align='right').encode(
y=alt.Y('row_number:O',axis=None)
).transform_window(
row_number='row_number()'
).transform_filter(
'datum.row_number < 10'
)
country = ranked_text.encode(text='Entity:N', strokeWidth=alt.value(0)).properties(title=alt.TitleParams(text='Country', align='right'))
energy = ranked_text.encode(text='TotalEnergy:N', strokeWidth=alt.value(0)).properties(title=alt.TitleParams(text='Total Energy Consumption (TWh)', align='right'))
text = alt.hconcat(country, energy)
return text
"""
# Climate Change, Energy, and Global Responsibility
## Energy Consumption and CO2 Emissions
"""
"""
Energy production is the major driving force behind climate change. Energy production is driven by energy needs which can vary by country. The geomap has been overlayed by the net energy comsumption of that country in the year 2019. It can be seen that countries like China, Russia, India, and USA have a very high energy consumption rate.
However, net energy consumption does not tell the full story. Some countries have more habitable regions and might have more people living on the same net energy. Click on the `Show Per Capita Energy` checkbox to find the per capita energy consumption by country.
The worst offenders are very different if we look at it from the per capita energy perspective. Countries like UAE, USA, Saudi Arabia, and Iceland have a very high per capita energy consumption. However, it must be noted that these countries have a much higher standard of living when compared to countries like Algeria and India.
If we are expecting these countries to grow, the per capita energy needs of these countries will also grow and so will the net CO2 emissions. Let's try visualize the hypothetical scenario of every country in the world has the same standard of living as the developed countries. Try clicking on one of the countries on the world map. It's per capita energy consumption and current fuel mix will be applied to all the countries and the resulting effects can be seen on the charts below.
"""
geo_map1, geo_map2, selector = get_energypc_chart_heatmap()
per_capita = st.checkbox("Show Per Capita Energy")
co2_pc, co2_cols = get_fuelco2()
co2_pc.loc[(co2_pc.Entity == 'United States'), ['Entity']] = 'United States of America'
# co2_pc_world = co2_pc[co2_pc.Entity == 'World']
# net_world_co2 = co2_pc[co2_pc.Entity == 'World'].NetCO2_WORLD.max()
co2_pc = co2_pc[co2_pc.Entity != 'World']
net_world_co2 = co2_pc['NetCO2'].mean()
net_co2_max = co2_pc['NetCO2'].max() + 10000
fuel_co2_long_2 = co2_pc.melt(id_vars='Entity', value_vars=list(map(lambda x: x, co2_cols)))
co2_chart = alt.Chart(
fuel_co2_long_2,
title='Global CO2 Emissions by Fuel Type Based on Selected Country\'s Rate'
).mark_bar().encode(
x=alt.X('variable', title='Emissions Source'),
y=alt.Y('mean(value):Q', title='CO2 emissions (T)'),
tooltip = [alt.Tooltip('mean(value):Q', title='CO2 emissions (T)')]
).properties(
width=200,
height=200
).transform_filter(
selector
).add_selection(selector)
netCO2_Line = pd.DataFrame({
'CO2 emissions(T)': [net_world_co2],
'Red Bar indicates:': "Original Net CO2 Emissions"
})
netCO2_rule_chart = alt.Chart(netCO2_Line).mark_rule().encode(
y=alt.Y('CO2 emissions(T):Q', title='CO2 emissions(T)'),
color = alt.Color('Red Bar indicates:', scale = alt.Scale(range=['red'])),
tooltip = [alt.Tooltip('CO2 emissions(T):Q', title='Global CO2 emissions(T) (Original)')]
).properties(
width=200,
height=200
)
fuel_co2_long_full = co2_pc.melt(id_vars='Entity', value_vars=['NetCO2'])
co2_chart_full = alt.Chart(
fuel_co2_long_full,
title='Global CO2 Emissions Based on Selected Country\'s Rate'
).mark_bar().encode(
x=alt.X('variable', title='World'),
y=alt.Y('mean(value):Q', title='CO2 emissions(T)', scale=alt.Scale(domain=[0, net_co2_max])),
tooltip = [alt.Tooltip('mean(value):Q', title='CO2 emissions (T)')],
).properties(
width=200,
height=200
).transform_filter(
selector
).add_selection(selector) + netCO2_rule_chart
df_energy_mix = get_energy(with_coords=False)
df_energy_mix.loc[(df_energy_mix.Entity == 'United States'), ['Entity']] = 'United States of America'
cols = [col for col in df_energy_mix.columns if col not in ('Entity', 'Code', 'Year', 'TotalEnergy')]
df_melt = df_energy_mix.melt(id_vars='Entity', value_vars=cols)
energy_mix_chart = alt.Chart(
df_melt,
title="Energy Sources for the Selected Country"
).mark_arc().encode(
theta=alt.Theta(field="value", type="quantitative"),
color=alt.Color(field="variable", title="Fuel Type"),
tooltip = [alt.Tooltip('value:Q', title='Energy TWh'), alt.Tooltip('Entity', title='Country'), alt.Tooltip('variable', title='Fuel Type')],
).transform_filter(selector).add_selection(selector)
if per_capita:
"""### What if every country had the same consumption rate?
Click on a country to apply its consumption rate to the world.
"""
st.write(((geo_map1 | geo_map2).resolve_scale(color='independent') & (energy_mix_chart | co2_chart | co2_chart_full).resolve_scale(color='independent')).resolve_scale(color='independent'))
else:
table_df = df_energy_mix[['Entity', 'TotalEnergy']].sort_values(['TotalEnergy'], ascending=False)
table_df = table_df[~table_df.Entity.isin(('World', 'Asia Pacific', 'OECD', 'Non-OECD', 'European Union', 'Middle East', 'Africa', 'South & Central America', 'North America', 'Europe', 'CIS'))]
st.write((geo_map1 | get_chart_table(table_df)).resolve_scale(
color="independent").configure_view(strokeWidth=0))
"""
### Predicting energy production and demand based on current growth rate
"""
def load_data():
energy_data = pd.read_csv("./data/owid-energy-data.csv")
return energy_data.sort_index()
energy_data = load_data()
all_countries = list(set(energy_data["country"]))
all_countries.sort()
option = st.selectbox(
'Select a Region:',
all_countries)
energy_ukr = energy_data[energy_data['country']==option]
energy_ukr = energy_ukr.dropna(subset = ['electricity_demand'])
try:
x_years = list(energy_ukr['year']) + list(range(2023,2030))
# st.write("Electricity Demand Historic Data and Prediction")
X = energy_ukr['year'].values[:,np.newaxis]
y_demand= energy_ukr['electricity_demand']
model = LinearRegression()
model.fit(X, y_demand)
#st.write(model.predict(x_years))
object_for_visualization = {'Years':x_years, "Power(in Terawatt-hour)": list(y_demand) + list(model.predict(np.asarray(x_years[-7:]).reshape(-1,1)))}
viz_df = pd.DataFrame(object_for_visualization)
viz_df['date'] = | pd.to_datetime(viz_df['Years']) | pandas.to_datetime |
from collections import namedtuple
import numpy as np
import pandas as pd
import random
from scipy.special import gammaln
from scipy.integrate import solve_ivp
from scipy.optimize import minimize
from scipy.linalg import expm
from tqdm import tqdm
from matplotlib import pyplot as plt
from tqdm import tqdm
from eda import us_data
from mass_pop_data import ma_county_pops
from tx_pop_data import tx_county_pops
from nyt_data import county_data
# T = len(us_data['confirmed'])
np.set_printoptions(precision=3)
log = np.log
exp = np.exp
#N = US_POP = 327 * 10**6
underreporting_factors = np.linspace(1, 10, 1000)
doubling_times = np.linspace(2, 7, 1000)
VAR_NAMES = ['s', 'i', 'c', 'ru', 'rc', 'd']
SEIR_VAR_NAMES = ['s', 'e', 'i', 'r', 'd']
SEIR_PARAM_NAMES = ['beta', 'sigma', 'gamma', 'mu', 'I0']
PARAM_NAMES = ['beta', 'delta', 'gamma_u', 'gamma_c', 'mu']
# Param assumptions
incubation_period = 14
recovery_period = 21
fatality_rate = 0.02
R0 = 2.2
iota = 1 / incubation_period
rho = 1 / recovery_period
delta = rho * (fatality_rate) / (1 - fatality_rate)
epsilon = R0 * (rho + delta)
def log_fac(x):
return gammaln(x + 1)
def sir_deriv(arr, params):
assert(np.isclose(np.sum(arr), 1))
s, i, c, ru, rc, d = arr
beta, delta, gamma_u, gamma_c, mu = params
ds = - beta * s * i
di = beta * s * i - gamma_u * i - delta * i
dc = delta * i - (mu + gamma_c) * c
dru = gamma_u * i
drc = gamma_c * c
dd = mu * c
darr = np.array([ds, di, dc, dru, drc, dd])
assert(np.isclose(np.sum(darr), 0))
return darr
def seir_deriv(x, params):
assert(np.isclose(np.sum(x), 1))
s, e, i, r, d = x
beta, sigma, gamma, mu = params
ds = -beta * s * i
de = beta * s * i - sigma * e
di = sigma * e - (gamma + mu) * i
dr = gamma * i
dd = mu * i
dx = np.array([ds, de, di, dr, dd])
assert(np.isclose(np.sum(dx), 0))
return dx
def solve_sir(x0, params, end_time):
f = lambda t, x: sir_deriv(x, params)
assert(np.isclose(sum(x0), 1))
t0 = 0
tf = end_time
t_span = (t0, tf)
sol = solve_ivp(f, t_span, x0, max_step=1, t_eval=range(tf))
return sol
def solve_seir(x0, params, end_time):
f = lambda t, x: seir_deriv(x, params)
assert(np.isclose(sum(x0), 1))
t0 = 0
tf = end_time
t_span = (t0, tf)
sol = solve_ivp(f, t_span, x0, max_step=1, t_eval=range(tf))
return sol
def init_approximation(params):
beta, delta, gamma_u, gamma_c, mu = params
ALPHA = beta - (delta + gamma_u)
ETA = gamma_c + mu
coeff = delta * I0/(ALPHA + ETA)
Kc = -coeff # c should be zero at t=0
def c(t):
return coeff * exp(ALPHA * t) + Kc*exp(-ETA*t)
def z(t):
return coeff / ALPHA * exp(ALPHA * t) - Kc / ETA * exp(-ETA*t)
Kz = -mu * z(0)
def d(t):
return mu * z(t) + Kz
Kru = -gamma_c * z(0)
def rc(t):
return gamma_c * z(t) + Kru
return c, rc, d
def bound(x, N):
return np.clip(x, 1/N, 1 - 1/N)
def init_approximation_sse(log_params, data):
M = 10
N = data['pop']
T = len(data['confirmed'])
params = exp(log_params)
ts = np.arange(T)
_c, _rc, _d = init_approximation(params)
c = (lambda x: bound(_c(x)))(ts)[:-2] + 1/N
d = (lambda x: bound(_d(x)))(ts)[:-2] + 1/N
rc = (lambda x: bound(_rc(x)))(ts)[:-2] + 1/N
trash = bound(1 - (c + d + rc))
obs_c = us_data['confirmed'][:-2]
obs_d = us_data['deaths'][:-2]
obs_rc = us_data['recovered']
obs_trash = N - (obs_c + obs_d + obs_rc)
prefactor = log_fac(N) - (log_fac(obs_c) + log_fac(obs_d) + log_fac(obs_rc) + log_fac(obs_trash))
#return sum(((log(c(ts) + 1/N) - log(obs_c + 1/N)))**2) + sum(((log(d(ts) + 1/N) - log(obs_d + 1/N)))**2) + sum((log(rc(ts)[:-2] + 1/N) - log(obs_rc + 1/N))**2)
return sum(prefactor + obs_c * log(c) + obs_d * log(d) + obs_rc * log(rc) + obs_trash * log(trash))
def q(x, sigma=0.01):
"""for use with log params"""
return x + np.random.normal(0, sigma, size=len(x))
def mh(lf, q, x, iterations=10000, modulus=100):
traj = []
ll = lf(x)
accepts = 0
for iteration in range(iterations):
xp = q(x)
llp = lf(xp)
if log(random.random()) < llp - ll:
x = xp
ll = llp
accepts += 1
if iteration % modulus == 0:
traj.append((x, ll))
print(
"{}/{} log_params: {} log-likelihood: {:1.3f} acceptances: {} acceptance ratio: {:1.3f}".format(
iteration, iterations, x, ll, accepts, accepts / (iteration + 1)
)
)
return traj
def fit_init_approximation(tol=10**-14):
x0 = np.random.normal(0, 1, size=len(PARAM_NAMES))
# x0 = np.array([ 13.26726095, -7.21161112, 13.26726049, -6.55617211,
# -52.65910809])
return minimize(init_approximation_sse, x0, method='powell', options={'maxiter': 100000, 'xtol':tol, 'disp':True})
def check_init_approxiation_fit(tol):
sol = fit_init_approximation(tol)
def plot_log_params(log_params, data, plot_data=True, plot_legend=True, show=True):
params = exp(log_params)
N = data['pop']
T = len(data['confirmed'])
c, rc, d = init_approximation(params)
obs_c = data['confirmed'] / N
obs_d = data['deaths'] / N
obs_rc = data['recovered'] / N
ts = np.arange(T)
if plot_data:
plt.plot(obs_c, linestyle=' ', marker='o', label='obs c')
plt.plot(obs_d, linestyle=' ', marker='o', label='obs d')
plt.plot(obs_rc, linestyle=' ', marker='o', label='obs rc')
plt.plot(c(ts), label='est c', color='b', linestyle='--')
plt.plot(d(ts), label='est d', color='orange', linestyle='--')
plt.plot(rc(ts), label='est rc', color='g', linestyle='--')
if plot_legend:
plt.legend()
if show:
plt.show()
def test_init_approximation(data):
# VAR_NAMES = ['s', 'i', 'c', 'ru', 'rc', 'd']
N = data['pop']
I0 = 1/N
ic = [1-I0, I0, 0, 0, 0, 0]
params = np.array([ 0.82, 0.22, 0.34, 2.30, 10.28]) * 3
sol = solve_sir(ic, params)
def estimate_init_conds():
confirmed_cases = 13
underreporting_factor = 10
initial_cases = confirmed_cases * underreporting_factor
susceptible_cases = boston_pop - initial_cases
infected_cases = initial_cases / 3
exposed_cases = initial_cases - infected_cases
s = susceptible_cases / boston_pop
e = exposed_cases / boston_pop
i = infected_cases / boston_pop
d = 0
r = 0
def plot_sir_sol(sol):
ts = sol.t
c = sol.y[VAR_NAMES.index('c'), :]
i = sol.y[VAR_NAMES.index('i'), :]
y = c + i
y0, yf = y[0], y[10]
t0, tf = ts[0], ts[10]
doublings = np.log2(yf / y0)
doubling_time = (tf - t0) / doublings
print("doubling time:", doubling_time)
for i, var_name in enumerate(var_names):
plt.plot(sol.y[i, :], label=var_name)
plt.legend()
plt.show()
def log_likelihood(sol, data):
obs_c = data['confirmed']
obs_rc = data['recovered']
obs_d = data['deaths']
N = data['pop']
T = len(data['confirmed'])
y_c = sol.y[VAR_NAMES.index('c'), :]
#y_rc = sol.y[VAR_NAMES.index('rc'), :]
y_d = sol.y[VAR_NAMES.index('d'), :]
y_trash = 1 - (y_c + y_d)
log_prob = 0
for t in range(T):
#print(t)
C, D = obs_c[t], obs_d[t]
TRASH = N - (C + D)
c, d, trash = y_c[t], y_d[t], y_trash[t]
prefactor = log_fac(N) - (log_fac(C) + log_fac(D) + log_fac(TRASH))
#print(c, rc, d)
log_prob_t = prefactor + C * log(c) + D * log(d) + TRASH * log(trash)
#print(prefactor, log_prob_t)
log_prob += log_prob_t
return log_prob
def log_likelihood2(sol, data):
obs_c = data['confirmed']
obs_rc = data['recovered']
obs_d = data['deaths']
N = data['pop']
T = len(data['confirmed'])
y_c = sol.y[VAR_NAMES.index('c'), :]
y_rc = sol.y[VAR_NAMES.index('rc'), :]
y_d = sol.y[VAR_NAMES.index('d'), :]
y_trash = 1 - (y_c + y_rc + y_d)
log_prob = 0
for t in range(T):
#print(t)
C, RC, D = obs_c[t], obs_rc[t], obs_d[t]
TRASH = N - (C + RC + D)
c, rc, d, trash = y_c[t], y_rc[t], y_d[t], y_trash[t]
#print(c, rc, d)
log_prob_t = -((C - c*N)**2 + (RC - rc*N)**2 + (D - (d*N))**2 + (TRASH - trash*N)**2)
#print(prefactor, log_prob_t)
log_prob += log_prob_t
return log_prob
def seir_log_likelihood(sol, data, only_deaths=True):
obs_c = data['confirmed']
obs_d = data['deaths']
N = data['pop']
T = len(data['confirmed'])
y_c = bound(sol.y[SEIR_VAR_NAMES.index('i'), :], N)
y_d = bound(sol.y[SEIR_VAR_NAMES.index('d'), :], N)
if only_deaths:
y_trash = 1 - (y_d)
else:
y_trash = 1 - (y_c + y_d)
log_prob = 0
for t in range(T):
#print(t)
# if obs_c[t] < 100:
# continue
if only_deaths:
D = obs_d[t]
TRASH = N - D
d, trash = y_d[t], y_trash[t]
log_prob += multinomial_ll([d, trash], [D, TRASH])
else:
C, D = obs_c[t], obs_d[t]
TRASH = N - (C + D)
c, d, trash = y_c[t], y_d[t], y_trash[t]
log_prob += multinomial_ll([c, d, trash], [C, D, TRASH])
# log_prob += sse_ll([c, d, trash], [C, D, TRASH])
return log_prob
def multinomial_ll(ps, obs):
N = np.sum(obs)
prefactor = log_fac(N) - sum(log_fac(n) for n in obs)
return prefactor + sum(o * log(p) for (p, o) in zip(ps, obs))
def sse_ll(ps, obs):
N = sum(obs)
return -sum((p * N - o)**2 for (p, o) in zip(ps, obs))
def random_hyp():
ic = np.array([0.99] + [random.random() * 0.01 for _ in range(len(VAR_NAMES) - 1)])
ic = ic / sum(ic)
log_thetas = np.random.normal(0, 1, size=len(PARAM_NAMES))
thetas = exp(log_thetas)
thetas[5:] /= 10
return ic, thetas
def mutate_hyp(hyp):
ic, thetas = hyp
log_ic = log(ic)
new_log_ic = log_ic + np.random.normal(0, 0.01, size=len(ic))
new_ic = exp(new_log_ic)
new_ic /= sum(new_ic)
log_thetas = log(thetas)
new_log_thetas = log_thetas + np.random.normal(0, 0.01, size=len(thetas))
new_thetas = exp(new_log_thetas)
return new_ic, new_thetas
def ll_from_hyp(hyp, data):
ic, thetas = hyp
T = len(data['confirmed'])
sol = solve_sir(ic, thetas, T)
return log_likelihood(sol, data)
def fit_model(data, generations=10000):
ll = None
traj = []
acceptances = 0
while ll is None:
hyp = random_hyp()
print(hyp)
prop_ll = ll_from_hyp(hyp, data)
if not np.isnan(prop_ll):
ll = prop_ll
for t in range(generations):
hyp_p = mutate_hyp(hyp)
ll_p = ll_from_hyp(hyp_p, data)
if np.log(random.random()) < ll_p - ll:
acceptances += 1
hyp = hyp_p
ll = ll_p
if t % 100 == 0:
traj.append((hyp, ll))
print(t, ll, "ar:", acceptances / (t + 1))
print(hyp)
return traj
def ps_from_lls(lls):
print("min, max:", min(lls), max(lls))
a = min(lls)
expa = exp(a)
ps = [exp(ll - a) for ll in lls]
return ps
def check_hyp(hyp, data):
N = data['pop']
T = len(data['confirmed'])
x0, params = hyp
sol = solve_sir(x0, params, T)
for name, ts in zip(VAR_NAMES, sol.y):
plt.plot(ts, label=name)
plt.plot(data['confirmed'] / N, label='obs confirmed', marker='o', linestyle=' ')
plt.plot(data['recovered'] / N, label='obs recovered', marker='o', linestyle=' ')
plt.plot(data['deaths'] / N, label='obs deaths', marker='o', linestyle=' ')
plt.legend()
def plot_lls(traj):
lls = [ll for (x, ll) in traj]
plt.subplot(2, 1, 1)
plt.plot(lls)
plt.xlabel("Iterations x 100", size='x-large')
plt.ylabel("Log-likelihood", size='x-large')
plt.subplot(2, 1, 2)
plt.plot(lls)
plt.ylim(-760, -730)
plt.xlabel("Iterations x 100", size='x-large')
plt.ylabel("Log-likelihood", size='x-large')
plt.tight_layout()
plt.savefig("ll-plot.png", dpi=300)
def plot_param_results(traj, data):
"""Use with SIR"""
N = data['pop']
T = len(data['confirmed'])
log_params, ll = traj[-1]
params = exp(log_params)
# VAR_NAMES = ['s', 'i', 'c', 'ru', 'rc', 'd']
params = exp(log_params)
c, rc, d = init_approximation(params)
sir_x0 = np.array([1-1/N, 1/N, 0, 0, 0, 0])
sir_sol = solve_sir(sir_x0, params)
sir_c, sir_rc, sir_d = sir_sol.y[2], sir_sol.y[4], sir_sol.y[5]
obs_c = data['confirmed'] / N
obs_d = data['deaths'] / N
obs_rc = data['recovered'] / N
ts = np.arange(T)
plt.subplot(3, 1, 1)
plt.plot(obs_c, linestyle=' ', marker='o', label='C (observed)')
plt.plot(sir_c, color='blue', label='C (SIR model)')
plt.plot(c(ts), color='orange', linestyle='--', label='C (init approx)')
plt.legend()
plt.subplot(3, 1, 2)
plt.plot(obs_rc, linestyle=' ', marker='o', label='Rc (observed)')
plt.plot(sir_rc, color='blue', label='Rc (SIR model)')
plt.plot(rc(ts), color='orange', linestyle='--', label='Rc (init approx)')
plt.ylabel("Population Fraction", size='x-large')
plt.legend()
plt.subplot(3, 1, 3)
plt.plot(obs_d, linestyle=' ', marker='o', label='D (observed)')
plt.plot(sir_d, color='blue', label='D (SIR model)')
plt.plot(d(ts), color='orange', linestyle='--', label='D (init approx)')
plt.legend()
plt.xlabel("Days since 1/22/20", size='x-large')
plt.tight_layout()
plt.savefig("fit-results.png", dpi=300)
plt.close()
def log_param_scatterplot(log_param_traj, param_names=["beta", "sigma", "gamma", "mu", "IO"]):
# param_traj = [exp(lp) for lp in log_param_traj]
K = len(log_param_traj[0])
log_param_vecs = list(zip(*log_param_traj))
for i, i_param in enumerate(param_names):
for j, j_param in enumerate(param_names):
plt_idx = j * K + i + 1
print(i_param, j_param)
plt.subplot(K, K, plt_idx)
if plt_idx % K == 1:
plt.ylabel(j_param)
if j_param == param_names[-1]:
plt.xlabel(i_param)
print("x label:", i_param)
#plt.title(i_param + " " + j_param)
if i == j:
plt.hist(log_param_vecs[i])
else:
plt.scatter(log_param_vecs[i], log_param_vecs[j], s=5)
# plt.tight_layout()
# plt.savefig("param-pairplots.png", dpi=300)
# plt.close()l
def seir_experiment(data, log_params=None, iterations=10_000, sigma=0.01, only_deaths=True):
# S, E, I, R, D
T = len(data['confirmed'])
if log_params is None:
log_params = np.array([-0.19780107, -2.65762238, -3.21675428, -6.12722099, -19.6])
log_params = np.random.normal(-2, 1, size=len(log_params))
N = data['pop']
I0 = 1/N
log_params[-1] = log(I0) # seed I0 as 1 / US_POP
def lf(log_params):
params = exp(log_params)
params, I0 = params[:-1], params[-1]
init_condition = np.array([1 -I0, 0, I0, 0, 0])
sol = solve_seir(init_condition, params, T)
return seir_log_likelihood(sol, data, only_deaths=only_deaths)
traj = mh(lf, lambda x:q(x, sigma=sigma), log_params, modulus=10, iterations=iterations)
return traj
# log_params1 = traj1[-1][0]
# traj01 = mh(lf, lambda x:q(x, sigma=0.01), log_params1, modulus=10, iterations=1000)
# log_params01 = traj01[-1][0]
# traj001 = mh(lf, lambda x:q(x, sigma=0.01), log_params01, modulus=10, iterations=1000)
# log_params001 = traj001[-1][0]
# traj0001 = mh(lf, lambda x:q(x, sigma=0.001), log_params001, modulus=10, iterations=1000)
# return traj1 + traj01 + traj--1 + traj0001
def plot_seir_param_results(traj, data, fname=None):
log_params, ll = traj[-1]
T = len(data['confirmed'])
params = exp(log_params)
# SEIRD
params, I0 = params[:-1], params[-1]
init_condition = np.array([1 -I0, 0, I0, 0, 0])
seir_sol = solve_seir(init_condition, params, T)
seir_c, seir_r, seir_d = seir_sol.y[2], seir_sol.y[3], seir_sol.y[4]
N = data['pop']
T = len(data['confirmed'])
obs_c = data['confirmed']
obs_d = data['deaths']
#obs_rc = data['recovered'] / N
ts = np.arange(T)
approx_f = seir_approximation(init_condition, params)
approx_c = np.array([approx_f(t)[SEIR_VAR_NAMES.index('i')] for t in ts])
#approx_r = [approx_f(t)[SEIR_VAR_NAMES.index('r')] for t in ts]
approx_d = np.array([approx_f(t)[SEIR_VAR_NAMES.index('d')] for t in ts])
plt.subplot(2, 1, 1)
plt.plot(obs_c, linestyle=' ', marker='o', label='C (observed)')
plt.plot(seir_c * N, color='blue', label='C (SEIR model)')
plt.plot(approx_c * N, color='orange', label='C (approx)', linestyle='--')
# for log_params, ll in traj[::10]:
# params = exp(log_params)
# params, I0 = params[:-1], params[-1]
# init_condition = np.array([1 -I0, 0, I0, 0, 0])
# seir_sol = solve_seir(init_condition, params)
# seir_c, seir_r, seir_d = seir_sol.y[2], seir_sol.y[3], seir_sol.y[4]
# plt.plot(seir_c, color='blue', alpha=0.01)
plt.legend()
# plt.subplot(3, 1, 2)
# plt.plot(obs_rc, linestyle=' ', marker='o', label='Rc (observed)')
# plt.plot(seir_r, color='blue', label='Rc (SEIR model)')
# plt.plot(approx_r, color='orange', label='Rc (approx)', linestyle='--')
# plt.ylabel("Population Fraction", size='x-large')
# plt.legend()
plt.subplot(2, 1, 2)
plt.plot(obs_d, linestyle=' ', marker='o', label='D (observed)')
plt.plot(seir_d * N, color='blue', label='D (SEIR model)')
plt.plot(approx_d * N, color='orange', label='D (approx)', linestyle='--')
plt.legend()
plt.xlabel("Days since 1/22/20", size='x-large')
if fname:
plt.tight_layout()
plt.savefig("fit-results.png", dpi=300)
plt.close()
else:
plt.show()
def plot_seir_sol(sol):
start = 'Jan 22, 2020'
today = pd.to_datetime('now')
date_range = pd.date_range(start=start, end=today)
today_t = len(date_range)
for var_name, data in zip(SEIR_VAR_NAMES, sol.y):
plt.plot(data, label=var_name)
plt.axvline(today_t, linestyle='--', label='today')
plt.legend()
plt.xlabel("Days since 1/22/2020")
plt.show()
def plot_seir_sols_from_traj(traj, data):
N = data['pop']
colors = 'brygc'
for i, (log_params, ll) in tqdm(enumerate(traj)):
params = exp(log_params)
params, I0 = params[:-1], params[-1]
init_condition = np.array([1 -I0, 0, I0, 0, 0])
sol = solve_seir(init_condition, params, 365*2)
for var_name, time_series, color in zip(SEIR_VAR_NAMES, sol.y, colors):
plt.plot(
time_series * N,
label=(var_name if i == 0 else None),
color=color,
alpha=0.5
)
plt.plot(data['confirmed'], marker='o', linestyle=' ', label='obs C')
plt.plot(data['deaths'], marker='o', linestyle=' ', label='obs D')
start = 'Jan 22, 2020'
today = pd.to_datetime('now')
date_range = pd.date_range(start=start, end=today)
today_t = len(date_range)
plt.axvline(today_t, linestyle='--', label='today')
plt.legend()
plt.xlabel("Days since 1/22/2020")
plt.show()
def seir_approximation(y0, params):
beta, sigma, gamma, mu = params
Gamma = gamma + mu
A = np.matrix([
[0, 0, -beta, 0, 0],
[0, -sigma, beta, 0, 0],
[0, sigma, -(gamma + mu), 0, 0],
[0, 0, gamma, 0, 0],
[0, 0, mu, 0, 0],
])
return lambda t: expm(A*t).dot(y0)
def make_csv_from_ma_traj(traj, data, fname):
N = data['pop']
T = 365
cases = [[] for _ in range(T)]
deaths = [[] for _ in range(T)]
for log_tunables, ll in tqdm(traj):
log_params, log_I0 = log_tunables[:-1], log_tunables[-1]
params, I0 = exp(log_params), exp(log_I0)
x0 = np.array([1 -I0, 0, I0, 0, 0])
sol = solve_seir(x0, params, end_time=T)
cases_idx = SEIR_VAR_NAMES.index('i')
deaths_idx = SEIR_VAR_NAMES.index('d')
num_cases = sol.y[cases_idx, :] * N
num_deaths = sol.y[deaths_idx, :] * N
for i in range(T):
cases[i].append(num_cases[i])
deaths[i].append(num_deaths[i])
# cases = [sorted(col) for col in cases]
# deaths = [sorted(col) for col in deaths]
cases_mean = [np.mean(col) for col in cases]
cases_2p5 = [np.percentile(col, 2.5) for col in cases]
cases_97p5 = [np.percentile(col, 97.5) for col in cases]
deaths_mean = [np.mean(col) for col in deaths]
deaths_2p5 = [np.percentile(col, 2.5) for col in deaths]
deaths_97p5 = [np.percentile(col, 97.5) for col in deaths]
start = 'Jan 22, 2020'
end = pd.to_datetime(start) + pd.Timedelta(days=(365 - 1))
date_range = pd.date_range(start=start, end=end)
data_dict = {
'Date': date_range,
'Cases_Mean': round_to_int(cases_mean),
'Cases_LB': (round_to_int(cases_2p5)),
'Cases_UB': (round_to_int(cases_97p5)),
'Deaths_Mean': (round_to_int(deaths_mean)),
'Deaths_LB': (round_to_int(deaths_2p5)),
'Deaths_UB': (round_to_int(deaths_97p5)),
}
data_cols = ['Cases_Mean', 'Cases_LB', 'Cases_UB', 'Deaths_Mean', 'Deaths_LB', 'Deaths_UB']
for county, county_pop in sorted(ma_county_pops.items()):
county_frac = county_pop / N
for col_name in data_cols:
col = data_dict[col_name]
county_col = round_to_int(col * county_frac)
county_col_name = county + "_" + col_name
data_dict[county_col_name] = county_col
df = | pd.DataFrame(data_dict) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import decimal
import json
import multiprocessing as mp
from collections import OrderedDict
from datetime import date, datetime, time, timedelta
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.util.testing as tm
import pytest
import pyarrow as pa
import pyarrow.types as patypes
from pyarrow.compat import PY2
from .pandas_examples import dataframe_with_arrays, dataframe_with_lists
def _alltypes_example(size=100):
return pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
# TODO(wesm): Pandas only support ns resolution, Arrow supports s, ms,
# us, ns
'datetime': np.arange("2016-01-01T00:00:00.001", size,
dtype='datetime64[ms]'),
'str': [str(x) for x in range(size)],
'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],
'empty_str': [''] * size
})
def _check_pandas_roundtrip(df, expected=None, use_threads=True,
expected_schema=None,
check_dtype=True, schema=None,
preserve_index=False,
as_batch=False):
klass = pa.RecordBatch if as_batch else pa.Table
table = klass.from_pandas(df, schema=schema,
preserve_index=preserve_index,
nthreads=2 if use_threads else 1)
result = table.to_pandas(use_threads=use_threads)
if expected_schema:
# all occurences of _check_pandas_roundtrip passes expected_schema
# without the pandas generated key-value metadata, so we need to
# add it before checking schema equality
expected_schema = expected_schema.add_metadata(table.schema.metadata)
assert table.schema.equals(expected_schema)
if expected is None:
expected = df
tm.assert_frame_equal(result, expected, check_dtype=check_dtype,
check_index_type=('equiv' if preserve_index
else False))
def _check_series_roundtrip(s, type_=None, expected_pa_type=None):
arr = pa.array(s, from_pandas=True, type=type_)
if type_ is not None and expected_pa_type is None:
expected_pa_type = type_
if expected_pa_type is not None:
assert arr.type == expected_pa_type
result = pd.Series(arr.to_pandas(), name=s.name)
if patypes.is_timestamp(arr.type) and arr.type.tz is not None:
result = (result.dt.tz_localize('utc')
.dt.tz_convert(arr.type.tz))
tm.assert_series_equal(s, result)
def _check_array_roundtrip(values, expected=None, mask=None,
type=None):
arr = pa.array(values, from_pandas=True, mask=mask, type=type)
result = arr.to_pandas()
values_nulls = pd.isnull(values)
if mask is None:
assert arr.null_count == values_nulls.sum()
else:
assert arr.null_count == (mask | values_nulls).sum()
if mask is None:
tm.assert_series_equal(pd.Series(result), pd.Series(values),
check_names=False)
else:
expected = pd.Series(np.ma.masked_array(values, mask=mask))
tm.assert_series_equal(pd.Series(result), expected,
check_names=False)
def _check_array_from_pandas_roundtrip(np_array, type=None):
arr = pa.array(np_array, from_pandas=True, type=type)
result = arr.to_pandas()
npt.assert_array_equal(result, np_array)
class TestConvertMetadata(object):
"""
Conversion tests for Pandas metadata & indices.
"""
def test_non_string_columns(self):
df = pd.DataFrame({0: [1, 2, 3]})
table = pa.Table.from_pandas(df)
assert table.column(0).name == '0'
def test_from_pandas_with_columns(self):
df = pd.DataFrame({0: [1, 2, 3], 1: [1, 3, 3], 2: [2, 4, 5]})
table = pa.Table.from_pandas(df, columns=[0, 1])
expected = pa.Table.from_pandas(df[[0, 1]])
assert expected.equals(table)
record_batch_table = pa.RecordBatch.from_pandas(df, columns=[0, 1])
record_batch_expected = pa.RecordBatch.from_pandas(df[[0, 1]])
assert record_batch_expected.equals(record_batch_table)
def test_column_index_names_are_preserved(self):
df = pd.DataFrame({'data': [1, 2, 3]})
df.columns.names = ['a']
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns(self):
columns = pd.MultiIndex.from_arrays([
['one', 'two'], ['X', 'Y']
])
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns_with_dtypes(self):
columns = pd.MultiIndex.from_arrays(
[
['one', 'two'],
pd.DatetimeIndex(['2017-08-01', '2017-08-02']),
],
names=['level_1', 'level_2'],
)
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns_unicode(self):
columns = pd.MultiIndex.from_arrays([[u'あ', u'い'], ['X', 'Y']])
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_integer_index_column(self):
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')])
_check_pandas_roundtrip(df, preserve_index=True)
def test_index_metadata_field_name(self):
# test None case, and strangely named non-index columns
df = pd.DataFrame(
[(1, 'a', 3.1), (2, 'b', 2.2), (3, 'c', 1.3)],
index=pd.MultiIndex.from_arrays(
[['c', 'b', 'a'], [3, 2, 1]],
names=[None, 'foo']
),
columns=['a', None, '__index_level_0__'],
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
col1, col2, col3, idx0, foo = js['columns']
assert col1['name'] == 'a'
assert col1['name'] == col1['field_name']
assert col2['name'] is None
assert col2['field_name'] == 'None'
assert col3['name'] == '__index_level_0__'
assert col3['name'] == col3['field_name']
idx0_name, foo_name = js['index_columns']
assert idx0_name == '__index_level_0__'
assert idx0['field_name'] == idx0_name
assert idx0['name'] is None
assert foo_name == 'foo'
assert foo['field_name'] == foo_name
assert foo['name'] == foo_name
def test_categorical_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.Index(list('def'), dtype='category')
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] is None
assert column_indexes['pandas_type'] == 'categorical'
assert column_indexes['numpy_type'] == 'int8'
md = column_indexes['metadata']
assert md['num_categories'] == 3
assert md['ordered'] is False
def test_string_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.Index(list('def'), name='stringz')
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] == 'stringz'
assert column_indexes['name'] == column_indexes['field_name']
assert column_indexes['pandas_type'] == ('bytes' if PY2 else 'unicode')
assert column_indexes['numpy_type'] == 'object'
md = column_indexes['metadata']
if not PY2:
assert len(md) == 1
assert md['encoding'] == 'UTF-8'
else:
assert md is None or 'encoding' not in md
def test_datetimetz_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.date_range(
start='2017-01-01', periods=3, tz='America/New_York'
)
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] is None
assert column_indexes['pandas_type'] == 'datetimetz'
assert column_indexes['numpy_type'] == 'datetime64[ns]'
md = column_indexes['metadata']
assert md['timezone'] == 'America/New_York'
def test_datetimetz_row_index(self):
df = pd.DataFrame({
'a': pd.date_range(
start='2017-01-01', periods=3, tz='America/New_York'
)
})
df = df.set_index('a')
_check_pandas_roundtrip(df, preserve_index=True)
def test_categorical_row_index(self):
df = pd.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]})
df['a'] = df.a.astype('category')
df = df.set_index('a')
_check_pandas_roundtrip(df, preserve_index=True)
def test_duplicate_column_names_does_not_crash(self):
df = pd.DataFrame([(1, 'a'), (2, 'b')], columns=list('aa'))
with pytest.raises(ValueError):
pa.Table.from_pandas(df)
def test_dictionary_indices_boundscheck(self):
# ARROW-1658. No validation of indices leads to segfaults in pandas
indices = [[0, 1], [0, -1]]
for inds in indices:
arr = pa.DictionaryArray.from_arrays(inds, ['a'], safe=False)
batch = pa.RecordBatch.from_arrays([arr], ['foo'])
table = pa.Table.from_batches([batch, batch, batch])
with pytest.raises(pa.ArrowInvalid):
arr.to_pandas()
with pytest.raises(pa.ArrowInvalid):
table.to_pandas()
def test_unicode_with_unicode_column_and_index(self):
df = pd.DataFrame({u'あ': [u'い']}, index=[u'う'])
_check_pandas_roundtrip(df, preserve_index=True)
def test_mixed_unicode_column_names(self):
df = pd.DataFrame({u'あ': [u'い'], b'a': 1}, index=[u'う'])
# TODO(phillipc): Should this raise?
with pytest.raises(AssertionError):
_check_pandas_roundtrip(df, preserve_index=True)
def test_binary_column_name(self):
column_data = [u'い']
key = u'あ'.encode('utf8')
data = {key: column_data}
df = pd.DataFrame(data)
# we can't use _check_pandas_roundtrip here because our metdata
# is always decoded as utf8: even if binary goes in, utf8 comes out
t = pa.Table.from_pandas(df, preserve_index=True)
df2 = t.to_pandas()
assert df.values[0] == df2.values[0]
assert df.index.values[0] == df2.index.values[0]
assert df.columns[0] == key
def test_multiindex_duplicate_values(self):
num_rows = 3
numbers = list(range(num_rows))
index = pd.MultiIndex.from_arrays(
[['foo', 'foo', 'bar'], numbers],
names=['foobar', 'some_numbers'],
)
df = pd.DataFrame({'numbers': numbers}, index=index)
table = pa.Table.from_pandas(df)
result_df = table.to_pandas()
tm.assert_frame_equal(result_df, df)
def test_metadata_with_mixed_types(self):
df = pd.DataFrame({'data': [b'some_bytes', u'some_unicode']})
table = pa.Table.from_pandas(df)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'bytes'
assert data_column['numpy_type'] == 'object'
def test_list_metadata(self):
df = pd.DataFrame({'data': [[1], [2, 3, 4], [5] * 7]})
schema = pa.schema([pa.field('data', type=pa.list_(pa.int64()))])
table = pa.Table.from_pandas(df, schema=schema)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'list[int64]'
assert data_column['numpy_type'] == 'object'
def test_decimal_metadata(self):
expected = pd.DataFrame({
'decimals': [
decimal.Decimal('394092382910493.12341234678'),
-decimal.Decimal('314292388910493.12343437128'),
]
})
table = pa.Table.from_pandas(expected)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'decimal'
assert data_column['numpy_type'] == 'object'
assert data_column['metadata'] == {'precision': 26, 'scale': 11}
def test_table_column_subset_metadata(self):
# ARROW-1883
df = pd.DataFrame({
'a': [1, 2, 3],
'b': pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')})
table = pa.Table.from_pandas(df)
table_subset = table.remove_column(1)
result = table_subset.to_pandas()
tm.assert_frame_equal(result, df[['a']])
table_subset2 = table_subset.remove_column(1)
result = table_subset2.to_pandas()
tm.assert_frame_equal(result, df[['a']])
# non-default index
for index in [
pd.Index(['a', 'b', 'c'], name='index'),
pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')]:
df = pd.DataFrame({'a': [1, 2, 3],
'b': [.1, .2, .3]}, index=index)
table = pa.Table.from_pandas(df)
table_subset = table.remove_column(1)
result = table_subset.to_pandas()
tm.assert_frame_equal(result, df[['a']])
table_subset2 = table_subset.remove_column(1)
result = table_subset2.to_pandas()
tm.assert_frame_equal(result, df[['a']].reset_index(drop=True))
def test_empty_list_metadata(self):
# Create table with array of empty lists, forced to have type
# list(string) in pyarrow
c1 = [["test"], ["a", "b"], None]
c2 = [[], [], []]
arrays = OrderedDict([
('c1', pa.array(c1, type=pa.list_(pa.string()))),
('c2', pa.array(c2, type=pa.list_(pa.string()))),
])
rb = pa.RecordBatch.from_arrays(
list(arrays.values()),
list(arrays.keys())
)
tbl = pa.Table.from_batches([rb])
# First roundtrip changes schema, because pandas cannot preserve the
# type of empty lists
df = tbl.to_pandas()
tbl2 = pa.Table.from_pandas(df, preserve_index=True)
md2 = json.loads(tbl2.schema.metadata[b'pandas'].decode('utf8'))
# Second roundtrip
df2 = tbl2.to_pandas()
expected = pd.DataFrame(OrderedDict([('c1', c1), ('c2', c2)]))
tm.assert_frame_equal(df2, expected)
assert md2['columns'] == [
{
'name': 'c1',
'field_name': 'c1',
'metadata': None,
'numpy_type': 'object',
'pandas_type': 'list[unicode]',
},
{
'name': 'c2',
'field_name': 'c2',
'metadata': None,
'numpy_type': 'object',
'pandas_type': 'list[empty]',
},
{
'name': None,
'field_name': '__index_level_0__',
'metadata': None,
'numpy_type': 'int64',
'pandas_type': 'int64',
}
]
class TestConvertPrimitiveTypes(object):
"""
Conversion tests for primitive (e.g. numeric) types.
"""
def test_float_no_nulls(self):
data = {}
fields = []
dtypes = [('f2', pa.float16()),
('f4', pa.float32()),
('f8', pa.float64())]
num_values = 100
for numpy_dtype, arrow_dtype in dtypes:
values = np.random.randn(num_values)
data[numpy_dtype] = values.astype(numpy_dtype)
fields.append(pa.field(numpy_dtype, arrow_dtype))
df = pd.DataFrame(data)
schema = pa.schema(fields)
_check_pandas_roundtrip(df, expected_schema=schema)
def test_float_nulls(self):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
dtypes = [('f2', pa.float16()),
('f4', pa.float32()),
('f8', pa.float64())]
names = ['f2', 'f4', 'f8']
expected_cols = []
arrays = []
fields = []
for name, arrow_dtype in dtypes:
values = np.random.randn(num_values).astype(name)
arr = pa.array(values, from_pandas=True, mask=null_mask)
arrays.append(arr)
fields.append(pa.field(name, arrow_dtype))
values[null_mask] = np.nan
expected_cols.append(values)
ex_frame = pd.DataFrame(dict(zip(names, expected_cols)),
columns=names)
table = pa.Table.from_arrays(arrays, names)
assert table.schema.equals(pa.schema(fields))
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_float_nulls_to_ints(self):
# ARROW-2135
df = pd.DataFrame({"a": [1.0, 2.0, pd.np.NaN]})
schema = pa.schema([pa.field("a", pa.int16(), nullable=True)])
table = pa.Table.from_pandas(df, schema=schema, safe=False)
assert table[0].to_pylist() == [1, 2, None]
tm.assert_frame_equal(df, table.to_pandas())
def test_integer_no_nulls(self):
data = OrderedDict()
fields = []
numpy_dtypes = [
('i1', pa.int8()), ('i2', pa.int16()),
('i4', pa.int32()), ('i8', pa.int64()),
('u1', pa.uint8()), ('u2', pa.uint16()),
('u4', pa.uint32()), ('u8', pa.uint64()),
('longlong', pa.int64()), ('ulonglong', pa.uint64())
]
num_values = 100
for dtype, arrow_dtype in numpy_dtypes:
info = np.iinfo(dtype)
values = np.random.randint(max(info.min, np.iinfo(np.int_).min),
min(info.max, np.iinfo(np.int_).max),
size=num_values)
data[dtype] = values.astype(dtype)
fields.append(pa.field(dtype, arrow_dtype))
df = pd.DataFrame(data)
schema = pa.schema(fields)
_check_pandas_roundtrip(df, expected_schema=schema)
def test_all_integer_types(self):
# Test all Numpy integer aliases
data = OrderedDict()
numpy_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8',
'byte', 'ubyte', 'short', 'ushort', 'intc', 'uintc',
'int_', 'uint', 'longlong', 'ulonglong']
for dtype in numpy_dtypes:
data[dtype] = np.arange(12, dtype=dtype)
df = pd.DataFrame(data)
_check_pandas_roundtrip(df)
# Do the same with pa.array()
# (for some reason, it doesn't use the same code paths at all)
for np_arr in data.values():
arr = pa.array(np_arr)
assert arr.to_pylist() == np_arr.tolist()
def test_integer_with_nulls(self):
# pandas requires upcast to float dtype
int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
expected_cols = []
arrays = []
for name in int_dtypes:
values = np.random.randint(0, 100, size=num_values)
arr = pa.array(values, mask=null_mask)
arrays.append(arr)
expected = values.astype('f8')
expected[null_mask] = np.nan
expected_cols.append(expected)
ex_frame = pd.DataFrame(dict(zip(int_dtypes, expected_cols)),
columns=int_dtypes)
table = pa.Table.from_arrays(arrays, int_dtypes)
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_array_from_pandas_type_cast(self):
arr = np.arange(10, dtype='int64')
target_type = pa.int8()
result = pa.array(arr, type=target_type)
expected = pa.array(arr.astype('int8'))
assert result.equals(expected)
def test_boolean_no_nulls(self):
num_values = 100
np.random.seed(0)
df = pd.DataFrame({'bools': np.random.randn(num_values) > 0})
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_boolean_nulls(self):
# pandas requires upcast to object dtype
num_values = 100
np.random.seed(0)
mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 10, size=num_values) < 5
arr = pa.array(values, mask=mask)
expected = values.astype(object)
expected[mask] = None
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
ex_frame = pd.DataFrame({'bools': expected})
table = pa.Table.from_arrays([arr], ['bools'])
assert table.schema.equals(schema)
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_float_object_nulls(self):
arr = np.array([None, 1.5, np.float64(3.5)] * 5, dtype=object)
df = pd.DataFrame({'floats': arr})
expected = pd.DataFrame({'floats': pd.to_numeric(arr)})
field = pa.field('floats', pa.float64())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected=expected,
expected_schema=schema)
def test_int_object_nulls(self):
arr = np.array([None, 1, np.int64(3)] * 5, dtype=object)
df = pd.DataFrame({'ints': arr})
expected = pd.DataFrame({'ints': pd.to_numeric(arr)})
field = pa.field('ints', pa.int64())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected=expected,
expected_schema=schema)
def test_boolean_object_nulls(self):
arr = np.array([False, None, True] * 100, dtype=object)
df = pd.DataFrame({'bools': arr})
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_all_nulls_cast_numeric(self):
arr = np.array([None], dtype=object)
def _check_type(t):
a2 = pa.array(arr, type=t)
assert a2.type == t
assert a2[0].as_py() is None
_check_type(pa.int32())
_check_type(pa.float64())
def test_half_floats_from_numpy(self):
arr = np.array([1.5, np.nan], dtype=np.float16)
a = pa.array(arr, type=pa.float16())
x, y = a.to_pylist()
assert isinstance(x, np.float16)
assert x == 1.5
assert isinstance(y, np.float16)
assert np.isnan(y)
a = pa.array(arr, type=pa.float16(), from_pandas=True)
x, y = a.to_pylist()
assert isinstance(x, np.float16)
assert x == 1.5
assert y is None
@pytest.mark.parametrize('dtype',
['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'])
def test_array_integer_object_nulls_option(dtype):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 100, size=num_values, dtype=dtype)
array = pa.array(values, mask=null_mask)
if null_mask.any():
expected = values.astype('O')
expected[null_mask] = None
else:
expected = values
result = array.to_pandas(integer_object_nulls=True)
np.testing.assert_equal(result, expected)
@pytest.mark.parametrize('dtype',
['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'])
def test_table_integer_object_nulls_option(dtype):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 100, size=num_values, dtype=dtype)
array = pa.array(values, mask=null_mask)
if null_mask.any():
expected = values.astype('O')
expected[null_mask] = None
else:
expected = values
expected = pd.DataFrame({dtype: expected})
table = pa.Table.from_arrays([array], [dtype])
result = table.to_pandas(integer_object_nulls=True)
tm.assert_frame_equal(result, expected)
class TestConvertDateTimeLikeTypes(object):
"""
Conversion tests for datetime- and timestamp-like types (date64, etc.).
"""
def test_timestamps_notimezone_no_nulls(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
field = pa.field('datetime64', pa.timestamp('ns'))
schema = pa.schema([field])
_check_pandas_roundtrip(
df,
expected_schema=schema,
)
def test_timestamps_notimezone_nulls(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
field = pa.field('datetime64', pa.timestamp('ns'))
schema = pa.schema([field])
_check_pandas_roundtrip(
df,
expected_schema=schema,
)
def test_timestamps_with_timezone(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123',
'2006-01-13T12:34:56.432',
'2010-08-13T05:46:57.437'],
dtype='datetime64[ms]')
})
df['datetime64'] = (df['datetime64'].dt.tz_localize('US/Eastern')
.to_frame())
_check_pandas_roundtrip(df)
_check_series_roundtrip(df['datetime64'])
# drop-in a null and ns instead of ms
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
None,
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
df['datetime64'] = (df['datetime64'].dt.tz_localize('US/Eastern')
.to_frame())
_check_pandas_roundtrip(df)
def test_python_datetime(self):
# ARROW-2106
date_array = [datetime.today() + timedelta(days=x) for x in range(10)]
df = pd.DataFrame({
'datetime': pd.Series(date_array, dtype=object)
})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].data.chunk(0), pa.TimestampArray)
result = table.to_pandas()
expected_df = pd.DataFrame({
'datetime': date_array
})
tm.assert_frame_equal(expected_df, result)
def test_python_datetime_subclass(self):
class MyDatetime(datetime):
# see https://github.com/pandas-dev/pandas/issues/21142
nanosecond = 0.0
date_array = [MyDatetime(2000, 1, 1, 1, 1, 1)]
df = pd.DataFrame({"datetime": pd.Series(date_array, dtype=object)})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].data.chunk(0), pa.TimestampArray)
result = table.to_pandas()
expected_df = pd.DataFrame({"datetime": date_array})
# https://github.com/pandas-dev/pandas/issues/21142
expected_df["datetime"] = pd.to_datetime(expected_df["datetime"])
tm.assert_frame_equal(expected_df, result)
def test_python_date_subclass(self):
class MyDate(date):
pass
date_array = [MyDate(2000, 1, 1)]
df = pd.DataFrame({"date": pd.Series(date_array, dtype=object)})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].data.chunk(0), pa.Date32Array)
result = table.to_pandas()
expected_df = pd.DataFrame(
{"date": np.array(["2000-01-01"], dtype="datetime64[ns]")}
)
tm.assert_frame_equal(expected_df, result)
def test_datetime64_to_date32(self):
# ARROW-1718
arr = pa.array([date(2017, 10, 23), None])
c = pa.Column.from_array("d", arr)
s = c.to_pandas()
arr2 = pa.Array.from_pandas(s, type=pa.date32())
assert arr2.equals(arr.cast('date32'))
@pytest.mark.parametrize('mask', [
None,
np.array([True, False, False]),
])
def test_pandas_datetime_to_date64(self, mask):
s = pd.to_datetime([
'2018-05-10T00:00:00',
'2018-05-11T00:00:00',
'2018-05-12T00:00:00',
])
arr = pa.Array.from_pandas(s, type=pa.date64(), mask=mask)
data = np.array([
date(2018, 5, 10),
date(2018, 5, 11),
date(2018, 5, 12)
])
expected = pa.array(data, mask=mask, type=pa.date64())
assert arr.equals(expected)
@pytest.mark.parametrize('mask', [
None,
np.array([True, False, False])
])
def test_pandas_datetime_to_date64_failures(self, mask):
s = pd.to_datetime([
'2018-05-10T10:24:01',
'2018-05-11T10:24:01',
'2018-05-12T10:24:01',
])
expected_msg = 'Timestamp value had non-zero intraday milliseconds'
with pytest.raises(pa.ArrowInvalid, match=expected_msg):
pa.Array.from_pandas(s, type=pa.date64(), mask=mask)
def test_array_date_as_object(self):
data = [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]
expected = np.array(['2000-01-01',
None,
'1970-01-01',
'2040-02-26'], dtype='datetime64')
arr = pa.array(data)
assert arr.equals(pa.array(expected))
result = arr.to_pandas()
assert result.dtype == expected.dtype
npt.assert_array_equal(arr.to_pandas(), expected)
result = arr.to_pandas(date_as_object=True)
expected = expected.astype(object)
assert result.dtype == expected.dtype
npt.assert_array_equal(result, expected)
def test_chunked_array_convert_date_as_object(self):
data = [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]
expected = np.array(['2000-01-01',
None,
'1970-01-01',
'2040-02-26'], dtype='datetime64')
carr = pa.chunked_array([data])
result = carr.to_pandas()
assert result.dtype == expected.dtype
npt.assert_array_equal(carr.to_pandas(), expected)
result = carr.to_pandas(date_as_object=True)
expected = expected.astype(object)
assert result.dtype == expected.dtype
npt.assert_array_equal(result, expected)
def test_column_convert_date_as_object(self):
data = [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]
expected = np.array(['2000-01-01',
None,
'1970-01-01',
'2040-02-26'], dtype='datetime64')
arr = pa.array(data)
column = pa.column('date', arr)
result = column.to_pandas()
npt.assert_array_equal(column.to_pandas(), expected)
result = column.to_pandas(date_as_object=True)
expected = expected.astype(object)
assert result.dtype == expected.dtype
npt.assert_array_equal(result, expected)
def test_table_convert_date_as_object(self):
df = pd.DataFrame({
'date': [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]})
table = pa.Table.from_pandas(df, preserve_index=False)
df_datetime = table.to_pandas()
df_object = table.to_pandas(date_as_object=True)
tm.assert_frame_equal(df.astype('datetime64[ns]'), df_datetime,
check_dtype=True)
tm.assert_frame_equal(df, df_object, check_dtype=True)
def test_date_infer(self):
df = pd.DataFrame({
'date': [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]})
table = pa.Table.from_pandas(df, preserve_index=False)
field = pa.field('date', pa.date32())
# schema's metadata is generated by from_pandas conversion
expected_schema = pa.schema([field], metadata=table.schema.metadata)
assert table.schema.equals(expected_schema)
result = table.to_pandas()
expected = df.copy()
expected['date'] = pd.to_datetime(df['date'])
tm.assert_frame_equal(result, expected)
def test_date_mask(self):
arr = np.array([date(2017, 4, 3), date(2017, 4, 4)],
dtype='datetime64[D]')
mask = [True, False]
result = pa.array(arr, mask=np.array(mask))
expected = np.array([None, date(2017, 4, 4)], dtype='datetime64[D]')
expected = pa.array(expected, from_pandas=True)
assert expected.equals(result)
def test_date_objects_typed(self):
arr = np.array([
date(2017, 4, 3),
None,
date(2017, 4, 4),
date(2017, 4, 5)], dtype=object)
arr_i4 = np.array([17259, -1, 17260, 17261], dtype='int32')
arr_i8 = arr_i4.astype('int64') * 86400000
mask = np.array([False, True, False, False])
t32 = pa.date32()
t64 = pa.date64()
a32 = pa.array(arr, type=t32)
a64 = pa.array(arr, type=t64)
a32_expected = pa.array(arr_i4, mask=mask, type=t32)
a64_expected = pa.array(arr_i8, mask=mask, type=t64)
assert a32.equals(a32_expected)
assert a64.equals(a64_expected)
# Test converting back to pandas
colnames = ['date32', 'date64']
table = pa.Table.from_arrays([a32, a64], colnames)
table_pandas = table.to_pandas()
ex_values = (np.array(['2017-04-03', '2017-04-04', '2017-04-04',
'2017-04-05'],
dtype='datetime64[D]')
.astype('datetime64[ns]'))
ex_values[1] = pd.NaT.value
expected_pandas = pd.DataFrame({'date32': ex_values,
'date64': ex_values},
columns=colnames)
tm.assert_frame_equal(table_pandas, expected_pandas)
def test_dates_from_integers(self):
t1 = pa.date32()
t2 = pa.date64()
arr = np.array([17259, 17260, 17261], dtype='int32')
arr2 = arr.astype('int64') * 86400000
a1 = pa.array(arr, type=t1)
a2 = pa.array(arr2, type=t2)
expected = date(2017, 4, 3)
assert a1[0].as_py() == expected
assert a2[0].as_py() == expected
@pytest.mark.xfail(reason="not supported ATM",
raises=NotImplementedError)
def test_timedelta(self):
# TODO(jreback): Pandas only support ns resolution
# Arrow supports ??? for resolution
df = pd.DataFrame({
'timedelta': np.arange(start=0, stop=3 * 86400000,
step=86400000,
dtype='timedelta64[ms]')
})
pa.Table.from_pandas(df)
def test_pytime_from_pandas(self):
pytimes = [time(1, 2, 3, 1356),
time(4, 5, 6, 1356)]
# microseconds
t1 = pa.time64('us')
aobjs = np.array(pytimes + [None], dtype=object)
parr = pa.array(aobjs)
assert parr.type == t1
assert parr[0].as_py() == pytimes[0]
assert parr[1].as_py() == pytimes[1]
assert parr[2] is pa.NA
# DataFrame
df = pd.DataFrame({'times': aobjs})
batch = pa.RecordBatch.from_pandas(df)
assert batch[0].equals(parr)
# Test ndarray of int64 values
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
a1 = pa.array(arr, type=pa.time64('us'))
assert a1[0].as_py() == pytimes[0]
a2 = pa.array(arr * 1000, type=pa.time64('ns'))
assert a2[0].as_py() == pytimes[0]
a3 = pa.array((arr / 1000).astype('i4'),
type=pa.time32('ms'))
assert a3[0].as_py() == pytimes[0].replace(microsecond=1000)
a4 = pa.array((arr / 1000000).astype('i4'),
type=pa.time32('s'))
assert a4[0].as_py() == pytimes[0].replace(microsecond=0)
def test_arrow_time_to_pandas(self):
pytimes = [time(1, 2, 3, 1356),
time(4, 5, 6, 1356),
time(0, 0, 0)]
expected = np.array(pytimes[:2] + [None])
expected_ms = np.array([x.replace(microsecond=1000)
for x in pytimes[:2]] +
[None])
expected_s = np.array([x.replace(microsecond=0)
for x in pytimes[:2]] +
[None])
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
null_mask = np.array([False, False, True], dtype=bool)
a1 = pa.array(arr, mask=null_mask, type=pa.time64('us'))
a2 = pa.array(arr * 1000, mask=null_mask,
type=pa.time64('ns'))
a3 = pa.array((arr / 1000).astype('i4'), mask=null_mask,
type=pa.time32('ms'))
a4 = pa.array((arr / 1000000).astype('i4'), mask=null_mask,
type=pa.time32('s'))
names = ['time64[us]', 'time64[ns]', 'time32[ms]', 'time32[s]']
batch = pa.RecordBatch.from_arrays([a1, a2, a3, a4], names)
arr = a1.to_pandas()
assert (arr == expected).all()
arr = a2.to_pandas()
assert (arr == expected).all()
arr = a3.to_pandas()
assert (arr == expected_ms).all()
arr = a4.to_pandas()
assert (arr == expected_s).all()
df = batch.to_pandas()
expected_df = pd.DataFrame({'time64[us]': expected,
'time64[ns]': expected,
'time32[ms]': expected_ms,
'time32[s]': expected_s},
columns=names)
tm.assert_frame_equal(df, expected_df)
def test_numpy_datetime64_columns(self):
datetime64_ns = np.array([
'2007-07-13T01:23:34.123456789',
None,
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
_check_array_from_pandas_roundtrip(datetime64_ns)
datetime64_us = np.array([
'2007-07-13T01:23:34.123456',
None,
'2006-01-13T12:34:56.432539',
'2010-08-13T05:46:57.437699'],
dtype='datetime64[us]')
_check_array_from_pandas_roundtrip(datetime64_us)
datetime64_ms = np.array([
'2007-07-13T01:23:34.123',
None,
'2006-01-13T12:34:56.432',
'2010-08-13T05:46:57.437'],
dtype='datetime64[ms]')
_check_array_from_pandas_roundtrip(datetime64_ms)
datetime64_s = np.array([
'2007-07-13T01:23:34',
None,
'2006-01-13T12:34:56',
'2010-08-13T05:46:57'],
dtype='datetime64[s]')
_check_array_from_pandas_roundtrip(datetime64_s)
@pytest.mark.parametrize('dtype', [pa.date32(), pa.date64()])
def test_numpy_datetime64_day_unit(self, dtype):
datetime64_d = np.array([
'2007-07-13',
None,
'2006-01-15',
'2010-08-19'],
dtype='datetime64[D]')
_check_array_from_pandas_roundtrip(datetime64_d, type=dtype)
def test_array_from_pandas_date_with_mask(self):
m = np.array([True, False, True])
data = pd.Series([
date(1990, 1, 1),
date(1991, 1, 1),
date(1992, 1, 1)
])
result = pa.Array.from_pandas(data, mask=m)
expected = pd.Series([None, date(1991, 1, 1), None])
assert pa.Array.from_pandas(expected).equals(result)
def test_fixed_offset_timezone(self):
df = pd.DataFrame({
'a': [
pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.NaT
]
})
_check_pandas_roundtrip(df)
_check_serialize_components_roundtrip(df)
# ----------------------------------------------------------------------
# Conversion tests for string and binary types.
class TestConvertStringLikeTypes(object):
def test_pandas_unicode(self):
repeats = 1000
values = [u'foo', None, u'bar', u'mañana', np.nan]
df = pd.DataFrame({'strings': values * repeats})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_bytes_to_binary(self):
values = [u'qux', b'foo', None, bytearray(b'barz'), 'qux', np.nan]
df = pd.DataFrame({'strings': values})
table = pa.Table.from_pandas(df)
assert table[0].type == pa.binary()
values2 = [b'qux', b'foo', None, b'barz', b'qux', np.nan]
expected = pd.DataFrame({'strings': values2})
_check_pandas_roundtrip(df, expected)
@pytest.mark.large_memory
def test_bytes_exceed_2gb(self):
v1 = b'x' * 100000000
v2 = b'x' * 147483646
# ARROW-2227, hit exactly 2GB on the nose
df = pd.DataFrame({
'strings': [v1] * 20 + [v2] + ['x'] * 20
})
arr = pa.array(df['strings'])
assert isinstance(arr, pa.ChunkedArray)
assert arr.num_chunks == 2
arr = None
table = pa.Table.from_pandas(df)
assert table[0].data.num_chunks == 2
def test_fixed_size_bytes(self):
values = [b'foo', None, bytearray(b'bar'), None, None, b'hey']
df = pd.DataFrame({'strings': values})
schema = pa.schema([pa.field('strings', pa.binary(3))])
table = pa.Table.from_pandas(df, schema=schema)
assert table.schema[0].type == schema[0].type
assert table.schema[0].name == schema[0].name
result = table.to_pandas()
tm.assert_frame_equal(result, df)
def test_fixed_size_bytes_does_not_accept_varying_lengths(self):
values = [b'foo', None, b'ba', None, None, b'hey']
df = pd.DataFrame({'strings': values})
schema = pa.schema([pa.field('strings', pa.binary(3))])
with pytest.raises(pa.ArrowInvalid):
pa.Table.from_pandas(df, schema=schema)
def test_variable_size_bytes(self):
s = pd.Series([b'123', b'', b'a', None])
_check_series_roundtrip(s, type_=pa.binary())
def test_binary_from_bytearray(self):
s = pd.Series([bytearray(b'123'), bytearray(b''), bytearray(b'a'),
None])
# Explicitly set type
_check_series_roundtrip(s, type_=pa.binary())
# Infer type from bytearrays
_check_series_roundtrip(s, expected_pa_type=pa.binary())
def test_table_empty_str(self):
values = ['', '', '', '', '']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result1 = table.to_pandas(strings_to_categorical=False)
expected1 = pd.DataFrame({'strings': values})
tm.assert_frame_equal(result1, expected1, check_dtype=True)
result2 = table.to_pandas(strings_to_categorical=True)
expected2 = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result2, expected2, check_dtype=True)
def test_selective_categoricals(self):
values = ['', '', '', '', '']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
expected_str = pd.DataFrame({'strings': values})
expected_cat = pd.DataFrame({'strings': pd.Categorical(values)})
result1 = table.to_pandas(categories=['strings'])
tm.assert_frame_equal(result1, expected_cat, check_dtype=True)
result2 = table.to_pandas(categories=[])
tm.assert_frame_equal(result2, expected_str, check_dtype=True)
result3 = table.to_pandas(categories=('strings',))
tm.assert_frame_equal(result3, expected_cat, check_dtype=True)
result4 = table.to_pandas(categories=tuple())
tm.assert_frame_equal(result4, expected_str, check_dtype=True)
def test_table_str_to_categorical_without_na(self):
values = ['a', 'a', 'b', 'b', 'c']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result = table.to_pandas(strings_to_categorical=True)
expected = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result, expected, check_dtype=True)
with pytest.raises(pa.ArrowInvalid):
table.to_pandas(strings_to_categorical=True,
zero_copy_only=True)
def test_table_str_to_categorical_with_na(self):
values = [None, 'a', 'b', np.nan]
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result = table.to_pandas(strings_to_categorical=True)
expected = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result, expected, check_dtype=True)
with pytest.raises(pa.ArrowInvalid):
table.to_pandas(strings_to_categorical=True,
zero_copy_only=True)
# Regression test for ARROW-2101
def test_array_of_bytes_to_strings(self):
converted = pa.array(np.array([b'x'], dtype=object), pa.string())
assert converted.type == pa.string()
# Make sure that if an ndarray of bytes is passed to the array
# constructor and the type is string, it will fail if those bytes
# cannot be converted to utf-8
def test_array_of_bytes_to_strings_bad_data(self):
with pytest.raises(
pa.lib.ArrowInvalid,
match="was not a utf8 string"):
pa.array(np.array([b'\x80\x81'], dtype=object), pa.string())
def test_numpy_string_array_to_fixed_size_binary(self):
arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3')
converted = pa.array(arr, type=pa.binary(3))
expected = pa.array(list(arr), type=pa.binary(3))
assert converted.equals(expected)
mask = np.array([True, False, True])
converted = pa.array(arr, type=pa.binary(3), mask=mask)
expected = pa.array([b'foo', None, b'baz'], type=pa.binary(3))
assert converted.equals(expected)
with pytest.raises(pa.lib.ArrowInvalid,
match=r'Got bytestring of length 3 \(expected 4\)'):
arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3')
pa.array(arr, type=pa.binary(4))
with pytest.raises(
pa.lib.ArrowInvalid,
match=r'Got bytestring of length 12 \(expected 3\)'):
arr = np.array([b'foo', b'bar', b'baz'], dtype='|U3')
pa.array(arr, type=pa.binary(3))
class TestConvertDecimalTypes(object):
"""
Conversion test for decimal types.
"""
decimal32 = [
decimal.Decimal('-1234.123'),
decimal.Decimal('1234.439')
]
decimal64 = [
decimal.Decimal('-129934.123331'),
decimal.Decimal('129534.123731')
]
decimal128 = [
decimal.Decimal('394092382910493.12341234678'),
decimal.Decimal('-314292388910493.12343437128')
]
@pytest.mark.parametrize(('values', 'expected_type'), [
pytest.param(decimal32, pa.decimal128(7, 3), id='decimal32'),
pytest.param(decimal64, pa.decimal128(12, 6), id='decimal64'),
pytest.param(decimal128, pa.decimal128(26, 11), id='decimal128')
])
def test_decimal_from_pandas(self, values, expected_type):
expected = pd.DataFrame({'decimals': values})
table = pa.Table.from_pandas(expected, preserve_index=False)
field = pa.field('decimals', expected_type)
# schema's metadata is generated by from_pandas conversion
expected_schema = pa.schema([field], metadata=table.schema.metadata)
assert table.schema.equals(expected_schema)
@pytest.mark.parametrize('values', [
pytest.param(decimal32, id='decimal32'),
pytest.param(decimal64, id='decimal64'),
pytest.param(decimal128, id='decimal128')
])
def test_decimal_to_pandas(self, values):
expected = pd.DataFrame({'decimals': values})
converted = pa.Table.from_pandas(expected)
df = converted.to_pandas()
tm.assert_frame_equal(df, expected)
def test_decimal_fails_with_truncation(self):
data1 = [decimal.Decimal('1.234')]
type1 = pa.decimal128(10, 2)
with pytest.raises(pa.ArrowInvalid):
pa.array(data1, type=type1)
data2 = [decimal.Decimal('1.2345')]
type2 = pa.decimal128(10, 3)
with pytest.raises(pa.ArrowInvalid):
pa.array(data2, type=type2)
def test_decimal_with_different_precisions(self):
data = [
decimal.Decimal('0.01'),
decimal.Decimal('0.001'),
]
series = pd.Series(data)
array = pa.array(series)
assert array.to_pylist() == data
assert array.type == pa.decimal128(3, 3)
array = pa.array(data, type=pa.decimal128(12, 5))
expected = [decimal.Decimal('0.01000'), decimal.Decimal('0.00100')]
assert array.to_pylist() == expected
def test_decimal_with_None_explicit_type(self):
series = pd.Series([decimal.Decimal('3.14'), None])
_check_series_roundtrip(series, type_=pa.decimal128(12, 5))
# Test that having all None values still produces decimal array
series = pd.Series([None] * 2)
_check_series_roundtrip(series, type_=pa.decimal128(12, 5))
def test_decimal_with_None_infer_type(self):
series = pd.Series([decimal.Decimal('3.14'), None])
_check_series_roundtrip(series, expected_pa_type=pa.decimal128(3, 2))
def test_strided_objects(self, tmpdir):
# see ARROW-3053
data = {
'a': {0: 'a'},
'b': {0: decimal.Decimal('0.0')}
}
# This yields strided objects
df = pd.DataFrame.from_dict(data)
_check_pandas_roundtrip(df)
class TestListTypes(object):
"""
Conversion tests for list<> types.
"""
def test_column_of_arrays(self):
df, schema = dataframe_with_arrays()
_check_pandas_roundtrip(df, schema=schema, expected_schema=schema)
table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)
# schema's metadata is generated by from_pandas conversion
expected_schema = schema.add_metadata(table.schema.metadata)
assert table.schema.equals(expected_schema)
for column in df.columns:
field = schema.field_by_name(column)
_check_array_roundtrip(df[column], type=field.type)
def test_column_of_arrays_to_py(self):
# Test regression in ARROW-1199 not caught in above test
dtype = 'i1'
arr = np.array([
np.arange(10, dtype=dtype),
np.arange(5, dtype=dtype),
None,
np.arange(1, dtype=dtype)
])
type_ = pa.list_(pa.int8())
parr = pa.array(arr, type=type_)
assert parr[0].as_py() == list(range(10))
assert parr[1].as_py() == list(range(5))
assert parr[2].as_py() is None
assert parr[3].as_py() == [0]
def test_column_of_lists(self):
df, schema = dataframe_with_lists()
_check_pandas_roundtrip(df, schema=schema, expected_schema=schema)
table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)
# schema's metadata is generated by from_pandas conversion
expected_schema = schema.add_metadata(table.schema.metadata)
assert table.schema.equals(expected_schema)
for column in df.columns:
field = schema.field_by_name(column)
_check_array_roundtrip(df[column], type=field.type)
def test_column_of_lists_first_empty(self):
# ARROW-2124
num_lists = [[], [2, 3, 4], [3, 6, 7, 8], [], [2]]
series = pd.Series([np.array(s, dtype=float) for s in num_lists])
arr = pa.array(series)
result = pd.Series(arr.to_pandas())
tm.assert_series_equal(result, series)
def test_column_of_lists_chunked(self):
# ARROW-1357
df = pd.DataFrame({
'lists': np.array([
[1, 2],
None,
[2, 3],
[4, 5],
[6, 7],
[8, 9]
], dtype=object)
})
schema = pa.schema([
pa.field('lists', pa.list_(pa.int64()))
])
t1 = pa.Table.from_pandas(df[:2], schema=schema)
t2 = pa.Table.from_pandas(df[2:], schema=schema)
table = pa.concat_tables([t1, t2])
result = table.to_pandas()
tm.assert_frame_equal(result, df)
def test_column_of_lists_chunked2(self):
data1 = [[0, 1], [2, 3], [4, 5], [6, 7], [10, 11],
[12, 13], [14, 15], [16, 17]]
data2 = [[8, 9], [18, 19]]
a1 = pa.array(data1)
a2 = pa.array(data2)
t1 = pa.Table.from_arrays([a1], names=['a'])
t2 = pa.Table.from_arrays([a2], names=['a'])
concatenated = pa.concat_tables([t1, t2])
result = concatenated.to_pandas()
expected = pd.DataFrame({'a': data1 + data2})
tm.assert_frame_equal(result, expected)
def test_column_of_lists_strided(self):
df, schema = dataframe_with_lists()
df = pd.concat([df] * 6, ignore_index=True)
arr = df['int64'].values[::3]
assert arr.strides[0] != 8
_check_array_roundtrip(arr)
def test_nested_lists_all_none(self):
data = np.array([[None, None], None], dtype=object)
arr = pa.array(data)
expected = pa.array(list(data))
assert arr.equals(expected)
assert arr.type == pa.list_(pa.null())
data2 = np.array([None, None, [None, None],
np.array([None, None], dtype=object)],
dtype=object)
arr = pa.array(data2)
expected = pa.array([None, None, [None, None], [None, None]])
assert arr.equals(expected)
def test_nested_lists_all_empty(self):
# ARROW-2128
data = pd.Series([[], [], []])
arr = pa.array(data)
expected = pa.array(list(data))
assert arr.equals(expected)
assert arr.type == pa.list_(pa.null())
def test_nested_list_first_empty(self):
# ARROW-2711
data = pd.Series([[], [u"a"]])
arr = pa.array(data)
expected = pa.array(list(data))
assert arr.equals(expected)
assert arr.type == pa.list_(pa.string())
def test_nested_smaller_ints(self):
# ARROW-1345, ARROW-2008, there were some type inference bugs happening
# before
data = pd.Series([np.array([1, 2, 3], dtype='i1'), None])
result = pa.array(data)
result2 = pa.array(data.values)
expected = pa.array([[1, 2, 3], None], type=pa.list_(pa.int8()))
assert result.equals(expected)
assert result2.equals(expected)
data3 = pd.Series([np.array([1, 2, 3], dtype='f4'), None])
result3 = pa.array(data3)
expected3 = pa.array([[1, 2, 3], None], type=pa.list_(pa.float32()))
assert result3.equals(expected3)
def test_infer_lists(self):
data = OrderedDict([
('nan_ints', [[None, 1], [2, 3]]),
('ints', [[0, 1], [2, 3]]),
('strs', [[None, u'b'], [u'c', u'd']]),
('nested_strs', [[[None, u'b'], [u'c', u'd']], None])
])
df = pd.DataFrame(data)
expected_schema = pa.schema([
pa.field('nan_ints', pa.list_(pa.int64())),
pa.field('ints', pa.list_(pa.int64())),
pa.field('strs', pa.list_(pa.string())),
pa.field('nested_strs', pa.list_(pa.list_(pa.string())))
])
_check_pandas_roundtrip(df, expected_schema=expected_schema)
def test_infer_numpy_array(self):
data = OrderedDict([
('ints', [
np.array([0, 1], dtype=np.int64),
np.array([2, 3], dtype=np.int64)
])
])
df = pd.DataFrame(data)
expected_schema = pa.schema([
pa.field('ints', pa.list_(pa.int64()))
])
_check_pandas_roundtrip(df, expected_schema=expected_schema)
@pytest.mark.parametrize('t,data,expected', [
(
pa.int64,
[[1, 2], [3], None],
[None, [3], None]
),
(
pa.string,
[[u'aaa', u'bb'], [u'c'], None],
[None, [u'c'], None]
),
(
pa.null,
[[None, None], [None], None],
[None, [None], None]
)
])
def test_array_from_pandas_typed_array_with_mask(self, t, data, expected):
m = np.array([True, False, True])
s = pd.Series(data)
result = pa.Array.from_pandas(s, mask=m, type=pa.list_(t()))
assert pa.Array.from_pandas(expected,
type=pa.list_(t())).equals(result)
def test_empty_list_roundtrip(self):
empty_list_array = np.empty((3,), dtype=object)
empty_list_array.fill([])
df = pd.DataFrame({'a': np.array(['1', '2', '3']),
'b': empty_list_array})
tbl = pa.Table.from_pandas(df)
result = tbl.to_pandas()
tm.assert_frame_equal(result, df)
def test_array_from_nested_arrays(self):
df, schema = dataframe_with_arrays()
for field in schema:
arr = df[field.name].values
expected = pa.array(list(arr), type=field.type)
result = pa.array(arr)
assert result.type == field.type # == list<scalar>
assert result.equals(expected)
class TestConvertStructTypes(object):
"""
Conversion tests for struct types.
"""
def test_to_pandas(self):
ints = pa.array([None, 2, 3], type=pa.int64())
strs = pa.array([u'a', None, u'c'], type=pa.string())
bools = pa.array([True, False, None], type=pa.bool_())
arr = pa.StructArray.from_arrays(
[ints, strs, bools],
['ints', 'strs', 'bools'])
expected = pd.Series([
{'ints': None, 'strs': u'a', 'bools': True},
{'ints': 2, 'strs': None, 'bools': False},
{'ints': 3, 'strs': u'c', 'bools': None},
])
series = pd.Series(arr.to_pandas())
tm.assert_series_equal(series, expected)
def test_from_numpy(self):
dt = np.dtype([('x', np.int32),
(('y_title', 'y'), np.bool_)])
ty = pa.struct([pa.field('x', pa.int32()),
pa.field('y', pa.bool_())])
data = np.array([], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == []
data = np.array([(42, True), (43, False)], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == [{'x': 42, 'y': True},
{'x': 43, 'y': False}]
# With mask
arr = pa.array(data, mask=np.bool_([False, True]), type=ty)
assert arr.to_pylist() == [{'x': 42, 'y': True}, None]
# Trivial struct type
dt = np.dtype([])
ty = pa.struct([])
data = np.array([], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == []
data = np.array([(), ()], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == [{}, {}]
def test_from_numpy_nested(self):
dt = np.dtype([('x', np.dtype([('xx', np.int8),
('yy', np.bool_)])),
('y', np.int16)])
ty = pa.struct([pa.field('x', pa.struct([pa.field('xx', pa.int8()),
pa.field('yy', pa.bool_())])),
pa.field('y', pa.int16())])
data = np.array([], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == []
data = np.array([((1, True), 2), ((3, False), 4)], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == [{'x': {'xx': 1, 'yy': True}, 'y': 2},
{'x': {'xx': 3, 'yy': False}, 'y': 4}]
@pytest.mark.large_memory
def test_from_numpy_large(self):
# Exercise rechunking + nulls
target_size = 3 * 1024**3 # 4GB
dt = np.dtype([('x', np.float64), ('y', 'object')])
bs = 65536 - dt.itemsize
block = b'.' * bs
n = target_size // (bs + dt.itemsize)
data = np.zeros(n, dtype=dt)
data['x'] = np.random.random_sample(n)
data['y'] = block
# Add implicit nulls
data['x'][data['x'] < 0.2] = np.nan
ty = pa.struct([pa.field('x', pa.float64()),
pa.field('y', pa.binary(bs))])
arr = pa.array(data, type=ty, from_pandas=True)
assert arr.num_chunks == 2
def iter_chunked_array(arr):
for chunk in arr.iterchunks():
for item in chunk:
yield item
def check(arr, data, mask=None):
assert len(arr) == len(data)
xs = data['x']
ys = data['y']
for i, obj in enumerate(iter_chunked_array(arr)):
try:
d = obj.as_py()
if mask is not None and mask[i]:
assert d is None
else:
x = xs[i]
if np.isnan(x):
assert d['x'] is None
else:
assert d['x'] == x
assert d['y'] == ys[i]
except Exception:
print("Failed at index", i)
raise
check(arr, data)
del arr
# Now with explicit mask
mask = np.random.random_sample(n) < 0.2
arr = pa.array(data, type=ty, mask=mask, from_pandas=True)
assert arr.num_chunks == 2
check(arr, data, mask)
del arr
def test_from_numpy_bad_input(self):
ty = pa.struct([pa.field('x', pa.int32()),
pa.field('y', pa.bool_())])
dt = np.dtype([('x', np.int32),
('z', np.bool_)])
data = np.array([], dtype=dt)
with pytest.raises(TypeError,
match="Missing field 'y'"):
pa.array(data, type=ty)
data = np.int32([])
with pytest.raises(TypeError,
match="Expected struct array"):
pa.array(data, type=ty)
class TestZeroCopyConversion(object):
"""
Tests that zero-copy conversion works with some types.
"""
def test_zero_copy_success(self):
result = pa.array([0, 1, 2]).to_pandas(zero_copy_only=True)
npt.assert_array_equal(result, [0, 1, 2])
def test_zero_copy_dictionaries(self):
arr = pa.DictionaryArray.from_arrays(
np.array([0, 0]),
np.array([5]))
result = arr.to_pandas(zero_copy_only=True)
values = pd.Categorical([5, 5])
tm.assert_series_equal(pd.Series(result), pd.Series(values),
check_names=False)
def check_zero_copy_failure(self, arr):
with pytest.raises(pa.ArrowInvalid):
arr.to_pandas(zero_copy_only=True)
def test_zero_copy_failure_on_object_types(self):
self.check_zero_copy_failure(pa.array(['A', 'B', 'C']))
def test_zero_copy_failure_with_int_when_nulls(self):
self.check_zero_copy_failure(pa.array([0, 1, None]))
def test_zero_copy_failure_with_float_when_nulls(self):
self.check_zero_copy_failure(pa.array([0.0, 1.0, None]))
def test_zero_copy_failure_on_bool_types(self):
self.check_zero_copy_failure(pa.array([True, False]))
def test_zero_copy_failure_on_list_types(self):
arr = pa.array([[1, 2], [8, 9]], type=pa.list_(pa.int64()))
self.check_zero_copy_failure(arr)
def test_zero_copy_failure_on_timestamp_types(self):
arr = np.array(['2007-07-13'], dtype='datetime64[ns]')
self.check_zero_copy_failure(pa.array(arr))
# This function must be at the top-level for Python 2.7's multiprocessing
def _non_threaded_conversion():
df = _alltypes_example()
_check_pandas_roundtrip(df, use_threads=False)
_check_pandas_roundtrip(df, use_threads=False, as_batch=True)
def _threaded_conversion():
df = _alltypes_example()
_check_pandas_roundtrip(df, use_threads=True)
_check_pandas_roundtrip(df, use_threads=True, as_batch=True)
class TestConvertMisc(object):
"""
Miscellaneous conversion tests.
"""
type_pairs = [
(np.int8, pa.int8()),
(np.int16, pa.int16()),
(np.int32, pa.int32()),
(np.int64, pa.int64()),
(np.uint8, pa.uint8()),
(np.uint16, pa.uint16()),
(np.uint32, pa.uint32()),
(np.uint64, pa.uint64()),
(np.float16, pa.float16()),
(np.float32, pa.float32()),
(np.float64, pa.float64()),
# XXX unsupported
# (np.dtype([('a', 'i2')]), pa.struct([pa.field('a', pa.int16())])),
(np.object, pa.string()),
(np.object, pa.binary()),
(np.object, pa.binary(10)),
(np.object, pa.list_(pa.int64())),
]
def test_all_none_objects(self):
df = pd.DataFrame({'a': [None, None, None]})
_check_pandas_roundtrip(df)
def test_all_none_category(self):
df = pd.DataFrame({'a': [None, None, None]})
df['a'] = df['a'].astype('category')
_check_pandas_roundtrip(df)
def test_empty_arrays(self):
for dtype, pa_type in self.type_pairs:
arr = np.array([], dtype=dtype)
_check_array_roundtrip(arr, type=pa_type)
def test_non_threaded_conversion(self):
_non_threaded_conversion()
def test_threaded_conversion_multiprocess(self):
# Parallel conversion should work from child processes too (ARROW-2963)
pool = mp.Pool(2)
try:
pool.apply(_threaded_conversion)
finally:
pool.close()
pool.join()
def test_category(self):
repeats = 5
v1 = ['foo', None, 'bar', 'qux', np.nan]
v2 = [4, 5, 6, 7, 8]
v3 = [b'foo', None, b'bar', b'qux', np.nan]
arrays = {
'cat_strings': | pd.Categorical(v1 * repeats) | pandas.Categorical |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Author: <NAME>
date: 2019/12/7 15:55
contact: <EMAIL>
desc: 获取空气质量在线监测分析平台的空气质量数据
https://www.aqistudy.cn/
此部分参考 https://github.com/PKUJohnson/OpenData/tree/master/opendatatools/aqi2
感谢 PKUJohnson 提供的加解密函数
"""
import json
import time
import pandas as pd
import requests
from bs4 import BeautifulSoup
from akshare.air.aqi_utils import *
def air_hourly(city="上海", date="2019-12-05"):
start_time = "%s 00:00:00" % date
end_time = "%s 23:59:59" % date
temp_df = get_server_data(city, "HOUR", start_time, end_time)
return temp_df.transform(pd.to_numeric)
def air_daily(city="上海", start_date="2019-11-01", end_date="2019-12-01"):
start_time = "%s 00:00:00" % start_date
end_time = "%s 23:59:59" % end_date
temp_df = get_server_data(city, "DAY", start_time, end_time)
return temp_df.transform(pd.to_numeric)
def get_server_data(city, period_type, start_time, end_time):
url = "https://www.aqistudy.cn/apinew/aqistudyapi.php"
app_id = "1a45f75b824b2dc628d5955356b5ef18"
method = "GETDETAIL"
timestamp = int(time.time() * 1000)
client_type = "WEB"
object_dict = {"city": city, "type": period_type, "startTime": start_time, "endTime": end_time}
secret_key = app_id + method + str(
timestamp) + client_type + "{\"city\":\"%s\",\"endTime\":\"%s\",\"startTime\":\"%s\",\"type\":\"%s\"}" % (
object_dict["city"], object_dict["endTime"], object_dict["startTime"], object_dict["type"]
)
secret = hashlib.md5(secret_key.encode("utf8")).hexdigest()
payload = {
"appId": app_id,
"method": method,
"timestamp": timestamp,
"clienttype": client_type,
"object": object_dict,
"secret": secret
}
payload = base64.standard_b64encode(json.dumps(payload).encode("utf8")).decode()
payload = aes_encrypt(real_aes_client_key, real_aes_client_iv, payload)
response = requests.post(url, data={"d": payload})
if response is None:
return None, "获取数据失败"
# data = base64.standard_b64decode(response.encode("utf8")).decode()
data = decrypt_response(real_des_key, real_des_iv, real_aes_server_key, real_aes_server_iv, response.text)
json_obj = json.loads(data)
success = json_obj["success"]
errcode = json_obj["errcode"]
errmsg = json_obj["errmsg"]
if errcode != 0:
return None, errmsg
result_rows = json_obj["result"]["data"]["rows"]
result_df = | pd.DataFrame(result_rows) | pandas.DataFrame |
import logging
from typing import Dict, Iterator, List, Optional, Set, Tuple
import pandas as pd
import sqlalchemy
from pandas import DataFrame
from crawler.constants import FIELD_PLATE_BARCODE, FIELD_ROOT_SAMPLE_ID
from crawler.sql_queries import SQL_MLWH_GET_CP_SAMPLES, SQL_MLWH_GET_CP_SAMPLES_BY_DATE
from crawler.types import Config, SampleDoc
logger = logging.getLogger(__name__)
def extract_required_cp_info(samples: List[SampleDoc]) -> Tuple[Set[str], Set[str]]:
root_sample_ids = set()
plate_barcodes = set()
for sample in samples:
root_sample_ids.add(str(sample[FIELD_ROOT_SAMPLE_ID]))
plate_barcodes.add(str(sample[FIELD_PLATE_BARCODE]))
return root_sample_ids, plate_barcodes
def filter_out_cherrypicked_samples(config: Config, samples: List[SampleDoc]) -> List[SampleDoc]:
"""Filters an input list of samples for those that have not been cherrypicked.
Arguments:
config {Config} -- application config specifying database details
samples {List[Sample]} -- the list of samples to filter
Returns:
List[Sample] -- non-cherrypicked samples
"""
root_sample_ids, plate_barcodes = extract_required_cp_info(samples)
cp_samples_df = get_cherrypicked_samples(config, list(root_sample_ids), list(plate_barcodes))
if cp_samples_df is None:
raise Exception("Unable to determine cherry-picked samples - potentially error connecting to MySQL")
elif not cp_samples_df.empty:
cp_samples = cp_samples_df[[FIELD_ROOT_SAMPLE_ID, FIELD_PLATE_BARCODE]].to_numpy().tolist()
return remove_cherrypicked_samples(samples, cp_samples)
else:
return samples
def get_cherrypicked_samples(
config: Config,
root_sample_ids: List[str],
plate_barcodes: List[str],
chunk_size: int = 50000,
) -> Optional[DataFrame]:
"""Find which samples have been cherrypicked using MLWH & Events warehouse.
Returns dataframe with 4 columns: those needed to uniquely identify the sample resulting
dataframe only contains those samples that have been cherrypicked (those that have an entry
for the relevant event type in the event warehouse)
Args:
root_sample_ids (List[str]): [description]
plate_barcodes (List[str]): [description]
chunk_size (int, optional): [description]. Defaults to 50000.
Returns:
DataFrame: [description]
"""
logger.debug("Getting cherry-picked samples from MLWH")
return build_data_frame_from_database_queries_aggregation(
database_connection_uri(config),
SQL_MLWH_GET_CP_SAMPLES,
params_for_cherrypicked_samples_query(root_sample_ids, plate_barcodes, chunk_size),
)
def get_cherrypicked_samples_by_date(
config: Config,
root_sample_ids: List[str],
plate_barcodes: List[str],
start_date: str,
end_date: str,
chunk_size: int = 50000,
) -> Optional[DataFrame]:
"""Find which samples have been cherrypicked between defined dates using MLWH & Events warehouse.
Returns dataframe with 4 columns: those needed to uniquely identify the sample resulting
dataframe only contains those samples that have been cherrypicked (those that have an entry
for the relevant event type in the event warehouse)
Args:
config (Config): application config specifying database details
root_sample_ids (List[str]): [description]
plate_barcodes (List[str]): [description]
start_date (str): lower limit on creation date
end_date (str): upper limit on creation date
chunk_size (int, optional): [description]. Defaults to 50000.
Returns:
DataFrame: [description]
"""
logger.debug("Getting cherry-picked samples from MLWH")
return build_data_frame_from_database_queries_aggregation(
database_connection_uri(config),
SQL_MLWH_GET_CP_SAMPLES_BY_DATE,
params_for_cherrypicked_samples_by_date_query(
root_sample_ids, plate_barcodes, start_date, end_date, chunk_size
),
)
def remove_cherrypicked_samples(samples: List[SampleDoc], cherry_picked_samples: List[List[str]]) -> List[SampleDoc]:
"""Remove samples that have been cherry-picked. We need to check on (root sample id, plate barcode) combo rather
than just root sample id. As multiple samples can exist with the same root sample id, with the potential for one
being cherry-picked, and one not.
Args:
samples (List[Sample]): List of samples in the shape of mongo documents
cherry_picked_samples (List[List[str]]): 2 dimensional list of cherry-picked samples with root sample id and
plate barcodes for each.
Returns:
List[Sample]: The original list of samples minus the cherry-picked samples.
"""
cherry_picked_sets = [{cp_sample[0], cp_sample[1]} for cp_sample in cherry_picked_samples]
filtered = filter(
lambda sample: {sample[FIELD_ROOT_SAMPLE_ID], sample[FIELD_PLATE_BARCODE]} not in cherry_picked_sets,
samples,
)
return list(filtered)
def params_for_cherrypicked_samples_query(
root_sample_ids: List[str],
plate_barcodes: List[str],
chunk_size: int = 50000,
) -> Iterator[Dict]:
for x in range(0, len(root_sample_ids), chunk_size):
yield {
"root_sample_ids": tuple(root_sample_ids[x : (x + chunk_size)]), # noqa: E203
"plate_barcodes": plate_barcodes,
}
def params_for_cherrypicked_samples_by_date_query(
root_sample_ids: List[str],
plate_barcodes: List[str],
start_date: str,
end_date: str,
chunk_size: int = 50000,
) -> Iterator[Dict]:
# TODO: Use [dictionary union](https://www.python.org/dev/peps/pep-0584/) when upgrading to Python 3.9
for params in params_for_cherrypicked_samples_query(root_sample_ids, plate_barcodes, chunk_size):
yield {
**params,
**{
"start_date": start_date,
"end_date": end_date,
},
}
def build_data_frame_from_database_queries_aggregation(
database_connection_uri: str, query_template: str, params_iterator: Iterator[Dict]
) -> Optional[DataFrame]:
try:
db_connection = None
logger.debug("Getting cherry-picked samples from MLWH")
# Create an empty DataFrame to merge into
concat_frame = pd.DataFrame()
sql_engine = sqlalchemy.create_engine(database_connection_uri, pool_recycle=3600)
db_connection = sql_engine.connect()
for params in params_iterator:
data_frame = | pd.read_sql(query_template, db_connection, params=params) | pandas.read_sql |
import pandas as pd
import xlrd
from plconv.models.supplier import ConvertParams
def avis(params: ConvertParams):
result_df = None
for param in params.in_params:
in_path = param.filepath
supplier = param.data.get('supplier', '-')
try:
xl = pd.ExcelFile(in_path)
except UnicodeDecodeError:
xlrd_book = xlrd.open_workbook(in_path, on_demand=True, encoding_override="cp1251")
xl = | pd.ExcelFile(xlrd_book) | pandas.ExcelFile |
# -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = 'Only length-1 decimal markers supported'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), decimal='')
def test_bad_stream_exception(self):
# Issue 13652:
# This test validates that both python engine
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
# stream must be binary UTF8
stream = codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter)
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
with tm.assert_raises_regex(UnicodeDecodeError, msg):
self.read_csv(stream)
stream.close()
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
assert not result._is_view
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read(3)
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read()
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# skipfooter
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#',
skipfooter=1)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa
pytest.raises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
assert len(df) == 3
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
tm.assert_index_equal(df.columns,
Index(['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4']))
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
expected = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D']))
assert df.index.name == 'index'
assert isinstance(
df.index[0], (datetime, np.datetime64, Timestamp))
assert df.values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns,
pd.Index(['A', 'B', 'C', 'D', 'E']))
assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp))
assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None)
assert isinstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
pytest.raises(ValueError, self.read_csv, StringIO(data))
def test_read_duplicate_index_explicit(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
result = self.read_table(StringIO(data), sep=',', index_col=0)
expected = self.read_table(StringIO(data), sep=',', ).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# make sure an error isn't thrown
self.read_csv(StringIO(data))
self.read_table(StringIO(data), sep=',')
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
assert data['A'].dtype == np.bool_
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.float64
assert data['B'].dtype == np.int64
def test_read_nrows(self):
expected = self.read_csv(StringIO(self.data1))[:3]
df = self.read_csv(StringIO(self.data1), nrows=3)
tm.assert_frame_equal(df, expected)
# see gh-10476
df = self.read_csv(StringIO(self.data1), nrows=3.0)
tm.assert_frame_equal(df, expected)
msg = r"'nrows' must be an integer >=0"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=1.2)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=-1)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# with invalid chunksize value:
msg = r"'chunksize' must be an integer >=1"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=1.3)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=0)
def test_read_chunksize_and_nrows(self):
# gh-15755
# With nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=2, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# chunksize > nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# with changing "size":
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(reader.get_chunk(size=2), df.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), df.iloc[2:5])
with pytest.raises(StopIteration):
reader.get_chunk(size=3)
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
assert len(piece) == 2
def test_read_chunksize_generated_index(self):
# GH 12185
reader = self.read_csv(StringIO(self.data1), chunksize=2)
df = self.read_csv(StringIO(self.data1))
tm.assert_frame_equal(pd.concat(reader), df)
reader = self.read_csv(StringIO(self.data1), chunksize=2, index_col=0)
df = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(pd.concat(reader), df)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# See gh-6607
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
| tm.assert_frame_equal(chunks[1], df[2:4]) | pandas.util.testing.assert_frame_equal |
import os
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import random_split, DataLoader, Dataset
import torchvision
from torchvision import transforms
from sklearn.model_selection import train_test_split
from pytorchcv.model_provider import get_model
from PIL import Image
from tqdm import tqdm
import pandas as pd
data_0 = pd.read_csv('fold_0_data.txt', delimiter='\t')
data_1 = pd.read_csv('fold_1_data.txt', delimiter='\t')
data_2 = pd.read_csv('fold_2_data.txt', delimiter='\t')
data_3 = pd.read_csv('fold_3_data.txt', delimiter='\t')
data_4 = pd.read_csv('fold_4_data.txt', delimiter='\t')
data = [data_0, data_1, data_2, data_3, data_4]
data = | pd.concat(data) | pandas.concat |
#Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
import xgboost as xgb
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import LogisticRegression
from sklearn import svm
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.info()
train.describe(include='all')
train.head()
plt.subplot(1,4,1)
train.groupby('type').mean()['rotting_flesh'].plot(kind='bar',figsize=(7,4), color='r')
plt.subplot(1,4,2)
train.groupby('type').mean()['bone_length'].plot(kind='bar',figsize=(7,4), color='g')
plt.subplot(1,4,3)
train.groupby('type').mean()['hair_length'].plot(kind='bar',figsize=(7,4), color='y')
plt.subplot(1,4,4)
train.groupby('type').mean()['has_soul'].plot(kind='bar',figsize=(7,4), color='teal')
sns.factorplot("type", col="color", col_wrap=4, data=train, kind="count", size=2.4, aspect=.8)
#The graphs look much better with higher figsize.
fig, ax = plt.subplots(2, 2, figsize = (16, 12))
sns.pointplot(x="color", y="rotting_flesh", hue="type", data=train, ax = ax[0, 0])
sns.pointplot(x="color", y="bone_length", hue="type", data=train, ax = ax[0, 1])
sns.pointplot(x="color", y="hair_length", hue="type", data=train, ax = ax[1, 0])
sns.pointplot(x="color", y="has_soul", hue="type", data=train, ax = ax[1, 1])
sns.pairplot(train, hue='type')
train['hair_soul'] = train['hair_length'] * train['has_soul']
train['hair_bone'] = train['hair_length'] * train['bone_length']
test['hair_soul'] = test['hair_length'] * test['has_soul']
test['hair_bone'] = test['hair_length'] * test['bone_length']
train['hair_soul_bone'] = train['hair_length'] * train['has_soul'] * train['bone_length']
test['hair_soul_bone'] = test['hair_length'] * test['has_soul'] * test['bone_length']
#test_id will be used later, so save it
test_id = test['id']
train.drop(['id'], axis=1, inplace=True)
test.drop(['id'], axis=1, inplace=True)
#Deal with 'color' column
col = 'color'
dummies = pd.get_dummies(train[col], drop_first=False)
dummies = dummies.add_prefix("{}#".format(col))
train.drop(col, axis=1, inplace=True)
train = train.join(dummies)
dummies = pd.get_dummies(test[col], drop_first=False)
dummies = dummies.add_prefix("{}#".format(col))
test.drop(col, axis=1, inplace=True)
test = test.join(dummies)
X_train = train.drop('type', axis=1)
le = LabelEncoder()
Y_train = le.fit_transform(train.type.values)
X_test = test
clf = RandomForestClassifier(n_estimators=200)
clf = clf.fit(X_train, Y_train)
indices = np.argsort(clf.feature_importances_)[::-1]
# Print the feature ranking
print('Feature ranking:')
for f in range(X_train.shape[1]):
print('%d. feature %d %s (%f)' % (f + 1, indices[f], X_train.columns[indices[f]],
clf.feature_importances_[indices[f]]))
best_features=X_train.columns[indices[0:7]]
X = X_train[best_features]
Xt = X_test[best_features]
#Splitting data for validation
Xtrain, Xtest, ytrain, ytest = train_test_split(X, Y_train, test_size=0.20, random_state=36)
forest = RandomForestClassifier(max_depth = None,
min_samples_split =5,
min_weight_fraction_leaf = 0.0,
max_leaf_nodes = 60)
parameter_grid = {'n_estimators' : [10, 20, 100, 150],
'criterion' : ['gini', 'entropy'],
'max_features' : ['auto', 'sqrt', 'log2', None]
}
grid_search = GridSearchCV(forest, param_grid=parameter_grid, scoring='accuracy', cv=StratifiedKFold(5))
grid_search.fit(X, Y_train)
print('Best score: {}'.format(grid_search.best_score_))
print('Best parameters: {}'.format(grid_search.best_params_))
forest = RandomForestClassifier(n_estimators = 20,
criterion = 'entropy',
max_features = 'auto')
parameter_grid = {
'max_depth' : [None, 5, 20, 100],
'min_samples_split' : [2, 5, 7],
'min_weight_fraction_leaf' : [0.0, 0.1],
'max_leaf_nodes' : [40, 60, 80],
}
grid_search = GridSearchCV(forest, param_grid=parameter_grid, scoring='accuracy', cv=StratifiedKFold(5))
grid_search.fit(X, Y_train)
print('Best score: {}'.format(grid_search.best_score_))
print('Best parameters: {}'.format(grid_search.best_params_))
#Optimal parameters
clf = RandomForestClassifier(n_estimators=20, n_jobs=-1, criterion = 'gini', max_features = 'sqrt',
min_samples_split=2, min_weight_fraction_leaf=0.0,
max_leaf_nodes=40, max_depth=100)
#Calibration improves probability predictions
calibrated_clf = CalibratedClassifierCV(clf, method='sigmoid', cv=5)
calibrated_clf.fit(Xtrain, ytrain)
y_val = calibrated_clf.predict_proba(Xtest)
print("Validation accuracy: ", sum(pd.DataFrame(y_val, columns=le.classes_).idxmax(axis=1).values
== le.inverse_transform(ytest))/len(ytest))
svc = svm.SVC(kernel='linear')
svc.fit(Xtrain, ytrain)
y_val_s = svc.predict(Xtest)
print("Validation accuracy: ", sum(le.inverse_transform(y_val_s)
== le.inverse_transform(ytest))/len(ytest))
#The last model is logistic regression
logreg = LogisticRegression()
parameter_grid = {'solver' : ['newton-cg', 'lbfgs'],
'multi_class' : ['ovr', 'multinomial'],
'C' : [0.005, 0.01, 1, 10, 100, 1000],
'tol': [0.0001, 0.001, 0.005]
}
grid_search = GridSearchCV(logreg, param_grid=parameter_grid, cv=StratifiedKFold(5))
grid_search.fit(Xtrain, ytrain)
print('Best score: {}'.format(grid_search.best_score_))
print('Best parameters: {}'.format(grid_search.best_params_))
log_reg = LogisticRegression(C = 1, tol = 0.0001, solver='newton-cg', multi_class='multinomial')
log_reg.fit(Xtrain, ytrain)
y_val_l = log_reg.predict_proba(Xtest)
print("Validation accuracy: ", sum(pd.DataFrame(y_val_l, columns=le.classes_).idxmax(axis=1).values
== le.inverse_transform(ytest))/len(ytest))
svc = svm.SVC(kernel='linear')
svc.fit(X, Y_train)
svc_pred = svc.predict(Xt)
clf = RandomForestClassifier(n_estimators=20, n_jobs=-1, criterion = 'gini', max_features = 'sqrt',
min_samples_split=2, min_weight_fraction_leaf=0.0,
max_leaf_nodes=40, max_depth=100)
calibrated_clf = CalibratedClassifierCV(clf, method='sigmoid', cv=5)
calibrated_clf.fit(X, Y_train)
for_pred = calibrated_clf.predict_proba(Xt)
log_reg.fit(X, Y_train)
log_pred = log_reg.predict_proba(Xt)
#I decided to try adding xgboost.
params = {"objective": "multi:softprob", "num_class": 3, 'eta': 0.01, 'min_child_weight' : 10, 'max_depth': 5}
param = list(params.items())
gbm = xgb.train(params, xgb.DMatrix(X, Y_train), 300)
x_pred = gbm.predict(xgb.DMatrix(Xt))
#Predicted values
s = le.inverse_transform(svc_pred)
l = pd.DataFrame(log_pred, columns=le.classes_).idxmax(axis=1).values
f = pd.DataFrame(for_pred, columns=le.classes_).idxmax(axis=1).values
x = pd.DataFrame(x_pred, columns=le.classes_).idxmax(axis=1).values
#Average of models, which give probability predictions.
q = pd.DataFrame(((log_pred + for_pred + x_pred)/3), columns=le.classes_).idxmax(axis=1).values
#As LR and SVC game the best results, I compare them
for i in range(len(s)):
if l[i] != s[i]:
print(i, l[i], s[i], f[i], x[i], q[i])
from collections import Counter
for i in range(len(s)):
type_list = [l[i], s[i], f[i], x[i]]
c = Counter(type_list)
if l[i] != c.most_common()[0][0]:
print(i, l[i], s[i], f[i], x[i], q[i], 'Most common: ' + c.most_common()[0][0])
#I tried several ways and here is the current version:
l[3] = 'Goblin'
l[44] = 'Ghost'
l[98] = 'Ghoul'
l[107] = 'Goblin'
l[112] = 'Ghost'
l[134] = 'Goblin'
l[162] = 'Ghoul'
l[173] = 'Goblin'
l[263] = 'Goblin'
l[309] = 'Goblin'
l[441] = 'Goblin'
l[445] = 'Ghost'
submission = | pd.DataFrame({'id':test_id, 'type':l}) | pandas.DataFrame |
import pandas as pd
import torch
from collections import defaultdict
import cosypose.utils.tensor_collection as tc
from cosypose.lib3d.transform_ops import invert_T
def parse_obs_data(obs):
data = defaultdict(list)
frame_info = obs['frame_info']
TWC = torch.as_tensor(obs['camera']['TWC']).float()
for n, obj in enumerate(obs['objects']):
info = dict(frame_obj_id=n,
label=obj['name'],
visib_fract=obj.get('visib_fract', 1),
scene_id=frame_info['scene_id'],
view_id=frame_info['view_id'])
data['infos'].append(info)
data['TWO'].append(obj['TWO'])
data['bboxes'].append(obj['bbox'])
for k, v in data.items():
if k != 'infos':
data[k] = torch.stack([torch.as_tensor(x) .float()for x in v])
data['infos'] = | pd.DataFrame(data['infos']) | pandas.DataFrame |
"""
This is the gradient boosting model using XGBoost for "TalkingData AdTracking Fraud Detection Challenge".
The model is revised from "https://www.kaggle.com/pranav84/lightgbm-fixing-unbalanced-data-lb-0-9680".
Features used:
ip: ip address of click.
app: app id for marketing.
device: device type id of user mobile phone (e.g., iphone 6 plus, iphone 7, huawei mate 7, etc.)
os: os version id of user mobile phone
channel: channel id of mobile ad publisher
click_time: timestamp of click (UTC)
day: day of click, derived from click_time
hour: hour of click, derived from click_time
dayofweek: day of week of click, derived from click_time
clicks_by_ip: number of clicks per IP
Target:
is_attributed: target that is to be predicted, indicating the app was downloaded
The evaluation is based on AUC-ROC score.
This model gets score 0.9648 in test set.
"""
import pandas as pd
import numpy as np
import xgboost as xgb
import os
import gc
import time
import datetime
# Debug flag
debug = False
# Parameters for XGBoost model
params = {'eta': 0.6,
'tree_method': "hist",
'grow_policy': "lossguide",
'max_leaves': 1400,
'max_depth': 0,
'subsample': 0.9,
'colsample_bytree': 0.7,
'colsample_bylevel':0.7,
'min_child_weight':0,
'alpha':4,
'objective': 'binary:logistic',
'scale_pos_weight':9,
'eval_metric': 'auc',
'nthread':8,
'random_state': 99,
'silent': True}
# Features used
train_cols = ['ip','app','device','os','channel','click_time','is_attributed']
test_cols = ['ip','app','device','os','channel','click_time','click_id']
dtypes = {
'ip' :'uint32',
'app' :'uint16',
'device' :'uint16',
'os' :'uint16',
'channel' :'uint16',
'is_attributed' :'uint8',
'click_id' :'uint32'
}
if not debug:
train = pd.read_csv("train.csv",usecols=train_cols,dtype=dtypes,skiprows=range(1,124903891),nrows=60000000)
test = pd.read_csv("test.csv",usecols=test_cols,dtype=dtypes)
# use part of training set to ensure validity of model
else:
train = pd.read_csv("train.csv",nrows=1000000,usecols=train_cols,dtype=dtypes)
test = pd.read_csv("test.csv",nrows=100000,usecols=test_cols,dtype=dtypes)
y = train['is_attributed']
train.drop(['is_attributed'],axis=1,inplace=True)
# Output dataframe
out = pd.DataFrame()
out['click_id'] = test['click_id'].astype('int')
test.drop(['click_id'],axis=1,inplace=True)
num_train = train.shape[0]
merge = pd.concat([train,test])
del train,test
gc.collect()
ip_count = merge.groupby(['ip'])['channel'].count().reset_index()
ip_count.columns = ['ip','clicks_by_ip']
merge = merge.merge(ip_count,on='ip',how='left',sort=False)
merge['clicks_by_ip'] = merge['clicks_by_ip'].astype('uint16')
merge['datetime'] = | pd.to_datetime(merge['click_time']) | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
# In[7]:
import pandas as pd
from datetime import datetime
import requests
import re
from urllib.request import urlopen
from lxml import etree
from alphacast import Alphacast
from dotenv import dotenv_values
API_KEY = dotenv_values(".env").get("API_KEY")
alphacast = Alphacast(API_KEY)
# In[8]:
url = "http://www.trabajo.gob.ar/estadisticas/oede/estadisticasnacionales.asp"
response = requests.get(url,verify=False)
html = response.content
htmlparser = etree.HTMLParser()
tree = etree.fromstring(html, htmlparser)
xls_address = tree.xpath("//*[@id='block-system-main']/section/article/div/div[9]/div/div[1]/div/div/ul[1]/li[1]/a/@href")[0]
xls_address
# In[9]:
url = 'http://www.trabajo.gob.ar' + xls_address
r = requests.get(url, allow_redirects=True, verify=False)
# In[10]:
worksheets = {
'C 1': {"prefix": 'Promedio',
"indice_final": 'Variaciones (%)'},
'C 2': {"prefix": 'Mediana',
"indice_final": 'Variaciones (%)'},
'C 3': {"prefix": 'Trabajadores con 5 años de antigüedad o más',
"indice_final": 'Variaciones (%)'},
'C 4': {"prefix": 'Dispersion salarial',
"indice_final": 'Diferencia en puntos porcentuales'}
}
df_merge_1 = pd.DataFrame()
for key in worksheets.keys():
df = pd.read_excel(r.content, skiprows= 4, sheet_name=(key),header=[0,1,2])
df = df.dropna(how='all').dropna(how='all',axis=1)
df = df.iloc[:,0:7]
if isinstance(df.columns, pd.MultiIndex):
df.columns = df.columns.map(' - '.join)
for col in df.columns:
new_col = re.sub('\n', ' ', col).replace("Unnamed: 0_level_0 - Período - Unnamed: 0_level_2", "Date").replace("(*)","")
df = df.rename(columns={col: new_col})
indiceFinal = df[df['Date'] == worksheets[key]["indice_final"]].index[0]
df = df[:indiceFinal-2]
df["Date"] = | pd.to_datetime(df["Date"], errors="coerce") | pandas.to_datetime |
import h3
import pandas as pd
import requests
from time import time
import json
import gencsv # generates hotspots.csv
density={0: {"N": 2, "density_tgt": 10000, "density_max": 10000},
1: {"N": 2, "density_tgt": 10000, "density_max": 10000},
2: {"N": 2, "density_tgt": 10000, "density_max": 10000},
3: {"N": 2, "density_tgt": 10000, "density_max": 10000},
4: {"N": 1, "density_tgt": 250, "density_max": 800},
5: {"N": 1, "density_tgt": 100, "density_max": 400},
6: {"N": 1, "density_tgt": 25, "density_max": 100},
7: {"N": 2, "density_tgt": 5, "density_max": 20},
8: {"N": 2, "density_tgt": 1, "density_max": 4},
9: {"N": 2, "density_tgt": 1, "density_max": 2},
10: {"N": 2, "density_tgt": 1, "density_max": 1}}
class scale_calculator:
'''
Helium hip17 scale calculator. takes a snapshot of all hotspots from the helium api and
creates hotspots.csv. This scale calculator class calculates the hip17 scaling factor based
off hotspots.csv and the density variable above so this can be used as a simulator to
explore the effects of adjusting the variables on the scaling factor.
'''
def __init__(self): # Initializing
try:
self.df=pd.read_csv("hotspots.csv")
except FileNotFoundError:
print('No hotspot.csv file. Getting latest from API...')
gencsv.download_hotspots()
self.df=pd.read_csv("hotspots.csv")
# clean up the data remove spaces and brackets
self.df.columns = self.df.columns.str.strip().str.lower().str.replace(' ', '_').str.replace('(', '').str.replace(')', '')
print("Total Hotspots: ", len(self.df))
# want to drop the inactive hotspots so they are not used in density calculations
# after 3600 blocks with no activity they are removed
#print('block height minimum',self.df['height'].min())
inactive_threshold=self.df['height'].max()-3600
#print('inactive threshold',inactive_threshold)
self.df_inactive = self.df[self.df.height < inactive_threshold]
self.df = self.df[self.df.height >= inactive_threshold]
self.df.dateadded = | pd.to_datetime(self.df['dateadded'], format='%Y-%m-%d %H:%M:%S.%f') | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
The good old INRIA pedestrian dataset.
I could not download the archive from [1],
and so downloaded the data from our group cluster.
~/work/vision_data/INRIAPerson
├── Test
│ ├── annotations
│ ├── annotations.lst
│ ├── neg
│ ├── neg.lst
│ ├── pos
│ └── pos.lst
└── Train
├── annotations
├── annotations.lst
├── neg
├── neg.lst
├── pos
└── pos.lst
[1]: http://pascal.inrialpes.fr/data/human/
"""
import os
import pandas as pd
import vislab
dirname = vislab.config['paths']['INRIAPerson']
def parse_annotation(anno_path):
with open(dirname + '/' + anno_path) as f:
lines = f.readlines()
objects = []
for line in lines:
if line.startswith('Image filename'):
filename = line.split(':')[-1].strip()[1:-1]
name = filename.split('/')[-1][:-4]
if line.startswith('Image size'):
width = int(line.split(':')[-1].split('x')[0].strip())
height = int(line.split(':')[-1].split('x')[1].strip())
if line.startswith('# Details for object'):
objects.append({'class': line.split('(')[-1][1:-3]})
if line.startswith('Original label for object'):
objects[-1]['label'] = line.split(':')[-1][2:-2]
if line.startswith('Center point on object'):
objects[-1]['center_x'] = int(
line.split('(')[-1].split(',')[0])
objects[-1]['center_y'] = int(
line.split(')')[-2].split(',')[-1].strip())
if line.startswith('Bounding box for object'):
_ = line.split(' : ')[-1]
objects[-1]['xmin'] = int(_.split(' - ')[0].split(',')[0][1:])
objects[-1]['ymin'] = int(_.split(' - ')[0].split(',')[1][1:-1])
objects[-1]['xmax'] = int(_.split(' - ')[1].split(',')[0][1:])
objects[-1]['ymax'] = int(_.split(' - ')[1].split(',')[1][1:-2])
objects_df = pd.DataFrame(objects)
objects_df['width'] = width
objects_df['height'] = height
objects_df['filename'] = filename
objects_df.index = pd.MultiIndex.from_tuples(
[(name, ind) for ind in objects_df.index],
names=['image_id', 'object_id']
)
return objects_df
def load_dataset(force=False):
cache_filename = vislab.config['paths']['shared_data'] + '/inria_dfs.h5'
if not force and os.path.exists(cache_filename):
images_df = pd.read_hdf(cache_filename, 'images_df')
objects_df = pd.read_hdf(cache_filename, 'objects_df')
return images_df, objects_df
objects_dfs = []
images_dfs = []
for split in ['Train', 'Test']:
# Load object data.
anno_filenames = [
_.strip() for _
in open('{}/{}/annotations.lst'.format(dirname, split)).readlines()
]
objects_df = pd.concat((
parse_annotation(anno_filename)
for anno_filename in anno_filenames
))
# Construct images_df from the objects data.
grouped = objects_df.groupby(level=0)
images_df = pd.DataFrame()
images_df['filename'] = objects_df.groupby(level=0).first()['filename']
images_df[['filename', 'width', 'height']] = grouped.first()[
['filename', 'width', 'height']]
# We know that all objects are PASperson, but let's count them.
images_df['PASperson'] = True
images_df['num_objects'] = grouped.count()['class']
# Load negative examples and append to the images_df.
neg_filenames, neg_image_ids = map(list, zip(*[
(_.strip(), _.strip().split('/')[-1][:-4]) for _
in open('{}/{}/neg.lst'.format(dirname, split)).readlines()
]))
neg_images_df = pd.DataFrame(index=neg_image_ids)
neg_images_df['filename'] = neg_filenames
neg_images_df['PASperson'] = False
neg_images_df['num_objects'] = 0
images_df = images_df.append(neg_images_df)
objects_df['split'] = split
images_df['split'] = split
objects_dfs.append(objects_df)
images_dfs.append(images_df)
objects_df = | pd.concat(objects_dfs) | pandas.concat |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Deep Recurrent Reinforcement Learning: 1 capa LSTM y 4 capas Dense, Funcion de activacion tanh, 12 episodes, 50 iteraciones
drnnLSTMtanhMakespan0=[799, 798, 799, 799, 805, 806, 799, 805, 805, 800, 798, 798]
drnnLSTMtanhMakespan1=[800, 798, 796, 800, 796, 794, 795, 798, 800, 798, 805, 798]
drnnLSTMtanhMakespan2=[796, 800, 798, 804, 800, 798, 798, 798, 800, 800, 802, 797]
drnnLSTMtanhMakespan3=[805, 800, 800, 803, 794, 802, 800, 798, 799, 804, 799, 806]
drnnLSTMtanhMakespan4=[796, 798, 795, 798, 796, 799, 800, 796, 796, 798, 806, 800]
drnnLSTMtanhMakespan5=[798, 798, 799, 800, 800, 808, 798, 798, 801, 796, 799, 798]
drnnLSTMtanhMakespan6=[800, 796, 805, 798, 798, 796, 799, 800, 803, 800, 798, 800]
drnnLSTMtanhMakespan7=[799, 805, 802, 805, 800, 799, 800, 799, 805, 800, 794, 796]
drnnLSTMtanhMakespan8=[799, 798, 800, 798, 798, 800, 800, 800, 804, 799, 800, 804]
drnnLSTMtanhMakespan9=[795, 800, 795, 796, 798, 796, 797, 800, 797, 798, 796, 795]
drnnLSTMtanhMakespan10=[804, 799, 805, 798, 798, 798, 805, 800, 796, 804, 796, 799]
drnnLSTMtanhMakespan11=[795, 803, 805, 798, 795, 801, 798, 798, 804, 803, 799, 804]
drnnLSTMtanhMakespan12=[798, 798, 799, 800, 798, 798, 799, 799, 801, 796, 799, 798]
drnnLSTMtanhMakespan13=[798, 798, 799, 797, 796, 796, 800, 797, 805, 800, 800, 794]
drnnLSTMtanhMakespan14=[800, 798, 798, 796, 800, 800, 798, 798, 802, 798, 802, 798]
drnnLSTMtanhMakespan15=[796, 796, 800, 801, 800, 800, 796, 794, 796, 800, 796, 798]
drnnLSTMtanhMakespan16=[798, 798, 795, 797, 795, 799, 800, 796, 795, 796, 800, 800]
drnnLSTMtanhMakespan17=[794, 795, 800, 798, 795, 796, 798, 796, 795, 794, 798, 796]
drnnLSTMtanhMakespan18=[797, 795, 794, 794, 800, 796, 796, 795, 798, 795, 798, 794]
drnnLSTMtanhMakespan19=[797, 795, 795, 796, 798, 799, 795, 799, 795, 794, 795, 795]
drnnLSTMtanhMakespan20=[796, 794, 798, 797, 798, 799, 795, 795, 797, 795, 795, 792]
drnnLSTMtanhMakespan21=[797, 795, 797, 793, 794, 794, 800, 794, 798, 795, 797, 795]
drnnLSTMtanhMakespan22=[794, 800, 798, 795, 795, 796, 796, 799, 795, 794, 795, 795]
drnnLSTMtanhMakespan23=[795, 795, 794, 795, 794, 794, 797, 799, 796, 794, 794, 795]
drnnLSTMtanhMakespan24=[798, 795, 795, 795, 792, 794, 795, 794, 794, 795, 795, 795]
drnnLSTMtanhMakespan25=[794, 792, 794, 795, 795, 794, 794, 794, 794, 795, 794, 793]
drnnLSTMtanhMakespan26=[794, 794, 795, 796, 798, 795, 794, 794, 794, 794, 795, 794]
drnnLSTMtanhMakespan27=[795, 794, 795, 795, 795, 794, 794, 794, 794, 794, 795, 795]
drnnLSTMtanhMakespan28=[795, 794, 794, 795, 794, 795, 795, 795, 795, 794, 795, 794]
drnnLSTMtanhMakespan29=[792, 794, 795, 794, 794, 795, 794, 793, 795, 794, 795, 792]
drnnLSTMtanhMakespan30=[795, 794, 795, 795, 794, 794, 794, 795, 794, 794, 794, 794]
drnnLSTMtanhMakespan31=[794, 794, 795, 794, 795, 793, 795, 795, 795, 792, 794, 794]
drnnLSTMtanhMakespan32=[795, 795, 794, 793, 795, 795, 795, 795, 794, 794, 795, 794]
drnnLSTMtanhMakespan33=[793, 794, 795, 793, 792, 795, 794, 794, 794, 794, 794, 795]
drnnLSTMtanhMakespan34=[794, 795, 795, 794, 794, 794, 794, 793, 794, 794, 794, 794]
drnnLSTMtanhMakespan35=[794, 794, 797, 793, 792, 794, 793, 794, 795, 794, 795, 792]
drnnLSTMtanhMakespan36=[794, 794, 793, 794, 795, 797, 795, 795, 794, 795, 793, 794]
drnnLSTMtanhMakespan37=[795, 793, 795, 794, 795, 798, 795, 794, 795, 793, 795, 794]
drnnLSTMtanhMakespan38=[794, 795, 793, 795, 794, 794, 794, 794, 794, 794, 797, 795]
drnnLSTMtanhMakespan39=[794, 794, 795, 794, 795, 795, 794, 795, 794, 795, 798, 797]
drnnLSTMtanhMakespan40=[795, 795, 794, 795, 794, 795, 795, 794, 794, 794, 795, 795]
drnnLSTMtanhMakespan41=[794, 795, 792, 794, 794, 798, 795, 794, 794, 794, 793, 795]
drnnLSTMtanhMakespan42=[793, 795, 794, 793, 794, 794, 792, 794, 795, 794, 794, 793]
drnnLSTMtanhMakespan43=[793, 792, 793, 794, 794, 795, 792, 794, 795, 794, 795, 794]
drnnLSTMtanhMakespan44=[793, 794, 795, 795, 794, 794, 795, 798, 794, 792, 795, 794]
drnnLSTMtanhMakespan45=[795, 794, 794, 794, 794, 792, 794, 795, 794, 796, 795, 794]
drnnLSTMtanhMakespan46=[794, 793, 793, 795, 795, 794, 794, 794, 794, 796, 794, 794]
drnnLSTMtanhMakespan47=[794, 794, 795, 794, 794, 795, 792, 795, 794, 795, 795, 794]
drnnLSTMtanhMakespan48=[794, 795, 794, 794, 794, 792, 794, 795, 796, 794, 794, 795]
drnnLSTMtanhMakespan49=[794, 794, 794, 794, 794, 794, 792, 794, 793, 794, 795, 794]
drnnLSTMtanhRewards0=[-0.1759911894273128, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.177078750549934, -0.17725973169122497, -0.1759911894273128, -0.177078750549934, -0.177078750549934, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765]
drnnLSTMtanhRewards1=[-0.17617264919621228, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17580964970257765, -0.17580964970257765]
drnnLSTMtanhRewards2=[-0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.1768976897689769, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17653532907770195, -0.17562802996914942]
drnnLSTMtanhRewards3=[-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17671654929577466, -0.17508269018743108, -0.17653532907770195, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.1768976897689769, -0.1759911894273128, -0.17725973169122497]
drnnLSTMtanhRewards4=[-0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17725973169122497, -0.17617264919621228]
drnnLSTMtanhRewards5=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.1776214552648934, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765]
drnnLSTMtanhRewards6=[-0.17617264919621228, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17617264919621228, -0.17671654929577466, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228]
drnnLSTMtanhRewards7=[-0.1759911894273128, -0.177078750549934, -0.17653532907770195, -0.177078750549934, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387]
drnnLSTMtanhRewards8=[-0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.1759911894273128, -0.17617264919621228, -0.1768976897689769]
drnnLSTMtanhRewards9=[-0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17562802996914942, -0.17617264919621228, -0.17562802996914942, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026]
drnnLSTMtanhRewards10=[-0.1768976897689769, -0.1759911894273128, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17617264919621228, -0.17544633017412387, -0.1768976897689769, -0.17544633017412387, -0.1759911894273128]
drnnLSTMtanhRewards11=[-0.17526455026455026, -0.17671654929577466, -0.177078750549934, -0.17580964970257765, -0.17526455026455026, -0.1763540290620872, -0.17580964970257765, -0.17580964970257765, -0.1768976897689769, -0.17671654929577466, -0.1759911894273128, -0.1768976897689769]
drnnLSTMtanhRewards12=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1763540290620872, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765]
drnnLSTMtanhRewards13=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17562802996914942, -0.17544633017412387, -0.17544633017412387, -0.17617264919621228, -0.17562802996914942, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108]
drnnLSTMtanhRewards14=[-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765]
drnnLSTMtanhRewards15=[-0.17544633017412387, -0.17544633017412387, -0.17617264919621228, -0.1763540290620872, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765]
drnnLSTMtanhRewards16=[-0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.17617264919621228]
drnnLSTMtanhRewards17=[-0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387]
drnnLSTMtanhRewards18=[-0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108]
drnnLSTMtanhRewards19=[-0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards20=[-0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17471872931833224]
drnnLSTMtanhRewards21=[-0.17562802996914942, -0.17526455026455026, -0.17562802996914942, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026]
drnnLSTMtanhRewards22=[-0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards23=[-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.1759911894273128, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drnnLSTMtanhRewards24=[-0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards25=[-0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221]
drnnLSTMtanhRewards26=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards27=[-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards28=[-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards29=[-0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224]
drnnLSTMtanhRewards30=[-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards31=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards32=[-0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards33=[-0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drnnLSTMtanhRewards34=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards35=[-0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.1749007498897221, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224]
drnnLSTMtanhRewards36=[-0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108]
drnnLSTMtanhRewards37=[-0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards38=[-0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026]
drnnLSTMtanhRewards39=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942]
drnnLSTMtanhRewards40=[-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards41=[-0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026]
drnnLSTMtanhRewards42=[-0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221]
drnnLSTMtanhRewards43=[-0.1749007498897221, -0.17471872931833224, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards44=[-0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards45=[-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards46=[-0.17508269018743108, -0.1749007498897221, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards47=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards48=[-0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drnnLSTMtanhRewards49=[-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
# Deep Recurrent Reinforcement Learning: 1 capa LSTM y 4 capas Dense, Funcion de activacion relu, 12 episodes, 50 iteraciones
drnnLSTMreluMakespan0=[805, 800, 800, 800, 794, 800, 798, 809, 795, 800, 798, 798]
drnnLSTMreluMakespan1=[798, 798, 796, 799, 800, 796, 796, 798, 798, 794, 798, 800]
drnnLSTMreluMakespan2=[805, 805, 798, 799, 806, 799, 806, 799, 800, 798, 805, 795]
drnnLSTMreluMakespan3=[800, 800, 800, 796, 800, 800, 799, 806, 808, 798, 797, 798]
drnnLSTMreluMakespan4=[805, 805, 795, 796, 799, 804, 798, 794, 798, 794, 796, 810]
drnnLSTMreluMakespan5=[798, 798, 798, 795, 800, 798, 796, 802, 800, 800, 805, 801]
drnnLSTMreluMakespan6=[800, 798, 798, 795, 800, 796, 800, 798, 799, 796, 805, 800]
drnnLSTMreluMakespan7=[800, 800, 800, 799, 798, 798, 800, 805, 800, 799, 800, 801]
drnnLSTMreluMakespan8=[799, 800, 800, 799, 795, 795, 805, 795, 798, 800, 798, 800]
drnnLSTMreluMakespan9=[800, 796, 805, 798, 798, 795, 805, 800, 799, 795, 800, 805]
drnnLSTMreluMakespan10=[805, 798, 805, 800, 801, 805, 799, 805, 798, 800, 800, 798]
drnnLSTMreluMakespan11=[798, 803, 800, 797, 795, 796, 794, 799, 800, 800, 800, 796]
drnnLSTMreluMakespan12=[799, 798, 799, 795, 798, 795, 798, 798, 798, 795, 798, 798]
drnnLSTMreluMakespan13=[798, 798, 799, 796, 798, 796, 800, 799, 796, 794, 796, 795]
drnnLSTMreluMakespan14=[796, 798, 806, 799, 804, 798, 805, 798, 800, 805, 794, 800]
drnnLSTMreluMakespan15=[806, 795, 800, 796, 798, 796, 810, 798, 799, 798, 800, 800]
drnnLSTMreluMakespan16=[799, 796, 798, 798, 798, 800, 798, 810, 796, 805, 800, 795]
drnnLSTMreluMakespan17=[798, 798, 798, 794, 798, 805, 801, 798, 800, 799, 798, 798]
drnnLSTMreluMakespan18=[795, 800, 794, 798, 797, 798, 794, 800, 797, 796, 794, 794]
drnnLSTMreluMakespan19=[798, 802, 794, 798, 799, 795, 797, 795, 800, 796, 797, 796]
drnnLSTMreluMakespan20=[794, 797, 795, 794, 799, 795, 795, 795, 800, 797, 794, 798]
drnnLSTMreluMakespan21=[799, 798, 796, 795, 794, 798, 795, 795, 798, 798, 795, 794]
drnnLSTMreluMakespan22=[794, 794, 795, 797, 795, 795, 795, 792, 794, 795, 794, 794]
drnnLSTMreluMakespan23=[794, 794, 794, 794, 795, 796, 793, 794, 795, 794, 797, 795]
drnnLSTMreluMakespan24=[794, 792, 792, 794, 796, 792, 794, 795, 794, 792, 796, 795]
drnnLSTMreluMakespan25=[794, 795, 795, 794, 794, 792, 795, 792, 795, 794, 794, 794]
drnnLSTMreluMakespan26=[795, 794, 794, 795, 794, 794, 793, 794, 797, 795, 794, 795]
drnnLSTMreluMakespan27=[794, 794, 795, 796, 795, 797, 794, 794, 795, 801, 794, 795]
drnnLSTMreluMakespan28=[795, 795, 795, 795, 794, 792, 794, 797, 794, 795, 795, 795]
drnnLSTMreluMakespan29=[794, 792, 798, 794, 797, 795, 793, 795, 795, 794, 795, 795]
drnnLSTMreluMakespan30=[795, 794, 798, 794, 794, 795, 792, 796, 794, 796, 794, 794]
drnnLSTMreluMakespan31=[794, 795, 795, 794, 795, 794, 795, 795, 794, 794, 795, 795]
drnnLSTMreluMakespan32=[798, 794, 794, 794, 798, 792, 795, 795, 795, 796, 794, 795]
drnnLSTMreluMakespan33=[794, 796, 794, 794, 794, 795, 794, 794, 797, 793, 793, 795]
drnnLSTMreluMakespan34=[794, 794, 795, 794, 794, 793, 794, 795, 793, 795, 795, 794]
drnnLSTMreluMakespan35=[798, 796, 795, 794, 795, 795, 795, 795, 794, 795, 797, 795]
drnnLSTMreluMakespan36=[794, 796, 794, 794, 794, 794, 795, 795, 797, 796, 795, 795]
drnnLSTMreluMakespan37=[795, 794, 796, 795, 795, 795, 795, 794, 792, 797, 794, 793]
drnnLSTMreluMakespan38=[794, 798, 794, 792, 794, 792, 795, 797, 793, 794, 794, 797]
drnnLSTMreluMakespan39=[792, 794, 794, 794, 792, 795, 795, 795, 794, 794, 795, 794]
drnnLSTMreluMakespan40=[792, 795, 795, 792, 795, 795, 794, 795, 794, 795, 794, 795]
drnnLSTMreluMakespan41=[794, 797, 795, 794, 795, 795, 798, 794, 795, 796, 796, 794]
drnnLSTMreluMakespan42=[794, 795, 795, 795, 794, 795, 795, 794, 794, 795, 793, 795]
drnnLSTMreluMakespan43=[795, 794, 795, 794, 795, 795, 792, 794, 794, 795, 794, 795]
drnnLSTMreluMakespan44=[795, 794, 792, 795, 794, 794, 795, 794, 796, 795, 796, 794]
drnnLSTMreluMakespan45=[795, 794, 793, 794, 793, 795, 794, 794, 795, 794, 795, 794]
drnnLSTMreluMakespan46=[794, 796, 793, 794, 794, 795, 799, 795, 794, 794, 794, 794]
drnnLSTMreluMakespan47=[794, 794, 794, 794, 795, 793, 795, 795, 794, 795, 795, 795]
drnnLSTMreluMakespan48=[794, 794, 795, 794, 795, 795, 795, 794, 794, 795, 795, 794]
drnnLSTMreluMakespan49=[795, 795, 795, 794, 795, 795, 794, 795, 793, 793, 792, 792]
drnnLSTMreluRewards0=[-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.1778021978021978, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765]
drnnLSTMreluRewards1=[-0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17617264919621228]
drnnLSTMreluRewards2=[-0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.17725973169122497, -0.1759911894273128, -0.17725973169122497, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.177078750549934, -0.17526455026455026]
drnnLSTMreluRewards3=[-0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.1759911894273128, -0.17725973169122497, -0.1776214552648934, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765]
drnnLSTMreluRewards4=[-0.177078750549934, -0.177078750549934, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.1768976897689769, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17544633017412387, -0.17798286090969018]
drnnLSTMreluRewards5=[-0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.17544633017412387, -0.17653532907770195, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.1763540290620872]
drnnLSTMreluRewards6=[-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.177078750549934, -0.17617264919621228]
drnnLSTMreluRewards7=[-0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.1763540290620872]
drnnLSTMreluRewards8=[-0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.177078750549934, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228]
drnnLSTMreluRewards9=[-0.17617264919621228, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17526455026455026, -0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.17617264919621228, -0.177078750549934]
drnnLSTMreluRewards10=[-0.177078750549934, -0.17580964970257765, -0.177078750549934, -0.17617264919621228, -0.1763540290620872, -0.1759911894273128, -0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765]
drnnLSTMreluRewards11=[-0.17580964970257765, -0.17671654929577466, -0.17617264919621228, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387]
drnnLSTMreluRewards12=[-0.1759911894273128, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765]
drnnLSTMreluRewards13=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.1759911894273128, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drnnLSTMreluRewards14=[-0.17544633017412387, -0.17580964970257765, -0.17725973169122497, -0.1759911894273128, -0.1768976897689769, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17508269018743108, -0.17617264919621228]
drnnLSTMreluRewards15=[-0.17725973169122497, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17798286090969018, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228]
drnnLSTMreluRewards16=[-0.1759911894273128, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17798286090969018, -0.17544633017412387, -0.177078750549934, -0.17617264919621228, -0.17526455026455026]
drnnLSTMreluRewards17=[-0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.177078750549934, -0.1763540290620872, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765]
drnnLSTMreluRewards18=[-0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17562802996914942, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards19=[-0.17580964970257765, -0.17653532907770195, -0.17508269018743108, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387]
drnnLSTMreluRewards20=[-0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17562802996914942, -0.17508269018743108, -0.17580964970257765]
drnnLSTMreluRewards21=[-0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards22=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards23=[-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026]
drnnLSTMreluRewards24=[-0.17508269018743108, -0.17471872931833224, -0.17471872931833224, -0.17508269018743108, -0.17544633017412387, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17544633017412387, -0.17526455026455026]
drnnLSTMreluRewards25=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards26=[-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards27=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1763540290620872, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards28=[-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards29=[-0.17508269018743108, -0.17471872931833224, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards30=[-0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards31=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards32=[-0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards33=[-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.1749007498897221, -0.1749007498897221, -0.17526455026455026]
drnnLSTMreluRewards34=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards35=[-0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026]
drnnLSTMreluRewards36=[-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards37=[-0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17562802996914942, -0.17508269018743108, -0.1749007498897221]
drnnLSTMreluRewards38=[-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17562802996914942, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942]
drnnLSTMreluRewards39=[-0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards40=[-0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards41=[-0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.17508269018743108]
drnnLSTMreluRewards42=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026]
drnnLSTMreluRewards43=[-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards44=[-0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108]
drnnLSTMreluRewards45=[-0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards46=[-0.17508269018743108, -0.17544633017412387, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards47=[-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards48=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards49=[-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.1749007498897221, -0.17471872931833224, -0.17471872931833224]
# Deep Recurrent Reinforcement Learning: 1 capa GRU y 4 capas Dense, Funcion de activacion tanh, 12 episodes, 50 iteraciones
drnnGRUtanhMakespan0 = [798, 799, 798, 804, 805, 799, 801, 801, 801, 799, 798, 796]
drnnGRUtanhMakespan1 = [800, 798, 798, 798, 798, 798, 801, 798, 795, 796, 800, 796]
drnnGRUtanhMakespan2 = [795, 804, 805, 800, 800, 796, 804, 800, 795, 798, 798, 801]
drnnGRUtanhMakespan3 = [806, 796, 794, 797, 798, 800, 800, 808, 805, 798, 800, 809]
drnnGRUtanhMakespan4 = [805, 801, 795, 798, 798, 800, 796, 796, 805, 798, 799, 798]
drnnGRUtanhMakespan5 = [804, 799, 798, 804, 796, 799, 798, 805, 796, 805, 798, 800]
drnnGRUtanhMakespan6 = [800, 799, 794, 801, 799, 796, 800, 804, 797, 796, 800, 798]
drnnGRUtanhMakespan7 = [798, 800, 810, 810, 805, 800, 795, 798, 800, 805, 799, 800]
drnnGRUtanhMakespan8 = [798, 797, 800, 800, 804, 805, 798, 798, 801, 795, 798, 809]
drnnGRUtanhMakespan9 = [803, 800, 800, 805, 805, 798, 804, 803, 805, 801, 810, 801]
drnnGRUtanhMakespan10 = [798, 799, 798, 798, 805, 804, 805, 798, 799, 798, 800, 800]
drnnGRUtanhMakespan11 = [796, 795, 805, 800, 800, 798, 795, 804, 805, 798, 800, 800]
drnnGRUtanhMakespan12 = [799, 799, 809, 800, 799, 799, 797, 805, 799, 800, 798, 795]
drnnGRUtanhMakespan13 = [805, 800, 800, 805, 800, 799, 798, 801, 798, 797, 805, 800]
drnnGRUtanhMakespan14 = [800, 798, 800, 800, 800, 804, 804, 799, 799, 800, 798, 798]
drnnGRUtanhMakespan15 = [805, 800, 795, 800, 804, 795, 800, 798, 799, 798, 800, 796]
drnnGRUtanhMakespan16 = [806, 795, 801, 799, 799, 796, 796, 794, 802, 796, 800, 802]
drnnGRUtanhMakespan17 = [796, 800, 798, 800, 794, 800, 804, 805, 798, 810, 800, 798]
drnnGRUtanhMakespan18 = [798, 800, 794, 794, 797, 798, 800, 805, 798, 798, 804, 798]
drnnGRUtanhMakespan19 = [796, 800, 806, 799, 796, 800, 798, 805, 798, 799, 797, 805]
drnnGRUtanhMakespan20 = [805, 800, 799, 796, 805, 805, 805, 794, 809, 796, 800, 797]
drnnGRUtanhMakespan21 = [798, 800, 800, 800, 798, 801, 796, 801, 801, 801, 795, 799]
drnnGRUtanhMakespan22 = [798, 801, 797, 800, 799, 795, 799, 799, 800, 801, 800, 799]
drnnGRUtanhMakespan23 = [800, 798, 799, 805, 794, 800, 798, 796, 796, 804, 800, 794]
drnnGRUtanhMakespan24 = [800, 800, 798, 805, 804, 799, 798, 801, 800, 798, 798, 798]
drnnGRUtanhMakespan25 = [798, 798, 798, 795, 800, 803, 798, 798, 800, 799, 796, 798]
drnnGRUtanhMakespan26 = [796, 798, 798, 798, 805, 796, 798, 798, 805, 795, 801, 796]
drnnGRUtanhMakespan27 = [794, 796, 796, 800, 800, 798, 800, 798, 802, 798, 797, 798]
drnnGRUtanhMakespan28 = [799, 799, 800, 800, 798, 802, 799, 798, 795, 795, 794, 798]
drnnGRUtanhMakespan29 = [798, 796, 796, 797, 796, 798, 800, 800, 796, 798, 800, 795]
drnnGRUtanhMakespan30 = [799, 798, 795, 795, 800, 795, 798, 798, 799, 798, 805, 799]
drnnGRUtanhMakespan31 = [795, 799, 794, 794, 796, 795, 795, 794, 798, 797, 798, 795]
drnnGRUtanhMakespan32 = [797, 798, 795, 796, 798, 795, 797, 798, 795, 794, 795, 796]
drnnGRUtanhMakespan33 = [799, 795, 794, 794, 798, 795, 798, 797, 800, 796, 795, 794]
drnnGRUtanhMakespan34 = [798, 795, 798, 796, 798, 794, 796, 798, 798, 798, 796, 797]
drnnGRUtanhMakespan35 = [795, 798, 796, 798, 794, 801, 795, 800, 795, 800, 794, 800]
drnnGRUtanhMakespan36 = [798, 799, 796, 797, 795, 794, 800, 795, 795, 794, 795, 795]
drnnGRUtanhMakespan37 = [799, 798, 795, 795, 794, 795, 795, 796, 805, 795, 798, 796]
drnnGRUtanhMakespan38 = [798, 794, 795, 795, 795, 796, 795, 796, 800, 798, 797, 796]
drnnGRUtanhMakespan39 = [794, 795, 795, 797, 795, 795, 794, 794, 798, 795, 794, 798]
drnnGRUtanhMakespan40 = [795, 795, 795, 795, 795, 795, 794, 794, 793, 797, 794, 795]
drnnGRUtanhMakespan41 = [794, 794, 795, 793, 795, 795, 792, 794, 795, 794, 794, 794]
drnnGRUtanhMakespan42 = [795, 795, 795, 796, 794, 797, 795, 795, 792, 795, 796, 793]
drnnGRUtanhMakespan43 = [794, 795, 795, 794, 795, 794, 798, 794, 797, 795, 794, 794]
drnnGRUtanhMakespan44 = [795, 795, 793, 794, 795, 794, 795, 795, 794, 794, 795, 794]
drnnGRUtanhMakespan45 = [794, 794, 794, 794, 794, 794, 795, 794, 794, 794, 796, 795]
drnnGRUtanhMakespan46 = [795, 794, 795, 794, 794, 794, 793, 794, 795, 795, 794, 797]
drnnGRUtanhMakespan47 = [794, 794, 794, 794, 795, 794, 795, 792, 794, 795, 794, 794]
drnnGRUtanhMakespan48 = [795, 794, 794, 794, 795, 798, 794, 794, 794, 795, 794, 794]
drnnGRUtanhMakespan49 = [795, 795, 794, 795, 793, 795, 796, 794, 795, 794, 794, 797]
drnnGRUtanhRewards0 = [-0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.1768976897689769, -0.177078750549934, -0.1759911894273128, -0.1763540290620872, -0.1763540290620872, -0.1763540290620872, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387]
drnnGRUtanhRewards1 = [-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.17580964970257765, -0.17526455026455026, -0.17544633017412387, -0.17617264919621228, -0.17544633017412387]
drnnGRUtanhRewards2 = [-0.17526455026455026, -0.1768976897689769, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.1768976897689769, -0.17617264919621228, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872]
drnnGRUtanhRewards3 = [-0.17725973169122497, -0.17544633017412387, -0.17508269018743108, -0.17562802996914942, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.1776214552648934, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.1778021978021978]
drnnGRUtanhRewards4 = [-0.177078750549934, -0.1763540290620872, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drnnGRUtanhRewards5 = [-0.1768976897689769, -0.1759911894273128, -0.17580964970257765, -0.1768976897689769, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.177078750549934, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17617264919621228]
drnnGRUtanhRewards6 = [-0.17617264919621228, -0.1759911894273128, -0.17508269018743108, -0.1763540290620872, -0.1759911894273128, -0.17544633017412387, -0.17617264919621228, -0.1768976897689769, -0.17562802996914942, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765]
drnnGRUtanhRewards7 = [-0.17580964970257765, -0.17617264919621228, -0.17798286090969018, -0.177078750549934, -0.17798286090969018, -0.17617264919621228, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.1759911894273128, -0.17617264919621228]
drnnGRUtanhRewards8 = [-0.17580964970257765, -0.17562802996914942, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.1763540290620872, -0.17580964970257765, -0.1778021978021978]
drnnGRUtanhRewards9 = [-0.17671654929577466, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.1768976897689769, -0.17671654929577466, -0.177078750549934, -0.1763540290620872, -0.17798286090969018, -0.1763540290620872]
drnnGRUtanhRewards10 = [-0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228]
drnnGRUtanhRewards11 = [-0.17544633017412387, -0.17526455026455026, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228]
drnnGRUtanhRewards12 = [-0.1759911894273128, -0.1759911894273128, -0.1778021978021978, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17562802996914942, -0.177078750549934, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026]
drnnGRUtanhRewards13 = [-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.1763540290620872, -0.17580964970257765, -0.17562802996914942, -0.177078750549934, -0.17617264919621228]
drnnGRUtanhRewards14 = [-0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.1768976897689769, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765]
drnnGRUtanhRewards15 = [-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17526455026455026, -0.1768976897689769, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387]
drnnGRUtanhRewards16 = [-0.17725973169122497, -0.17526455026455026, -0.1763540290620872, -0.1759911894273128, -0.1759911894273128, -0.17544633017412387, -0.17544633017412387, -0.17508269018743108, -0.17653532907770195, -0.17544633017412387, -0.17617264919621228, -0.17653532907770195]
drnnGRUtanhRewards17 = [-0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.17798286090969018, -0.17617264919621228, -0.17580964970257765]
drnnGRUtanhRewards18 = [-0.17580964970257765, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.1768976897689769, -0.17580964970257765]
drnnGRUtanhRewards19 = [-0.17544633017412387, -0.17617264919621228, -0.17725973169122497, -0.1759911894273128, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17562802996914942, -0.1759911894273128, -0.177078750549934]
drnnGRUtanhRewards20 = [-0.17617264919621228, -0.177078750549934, -0.1759911894273128, -0.17544633017412387, -0.177078750549934, -0.177078750549934, -0.177078750549934, -0.17508269018743108, -0.1778021978021978, -0.17544633017412387, -0.17617264919621228, -0.17562802996914942]
drnnGRUtanhRewards21 = [-0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1763540290620872, -0.17544633017412387, -0.1763540290620872, -0.1763540290620872, -0.1763540290620872, -0.17526455026455026, -0.1759911894273128]
drnnGRUtanhRewards22 = [-0.17580964970257765, -0.1763540290620872, -0.17562802996914942, -0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.1763540290620872, -0.17617264919621228, -0.1759911894273128]
drnnGRUtanhRewards23 = [-0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.1768976897689769, -0.17617264919621228, -0.17508269018743108]
drnnGRUtanhRewards24 = [-0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.177078750549934, -0.1768976897689769, -0.17580964970257765, -0.1763540290620872, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765]
drnnGRUtanhRewards25 = [-0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.17671654929577466, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765]
drnnGRUtanhRewards26 = [-0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17526455026455026, -0.1763540290620872, -0.17544633017412387]
drnnGRUtanhRewards27 = [-0.17508269018743108, -0.17544633017412387, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765]
drnnGRUtanhRewards28 = [-0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drnnGRUtanhRewards29 = [-0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026]
drnnGRUtanhRewards30 = [-0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.1759911894273128]
drnnGRUtanhRewards31 = [-0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026]
drnnGRUtanhRewards32 = [-0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drnnGRUtanhRewards33 = [-0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17562802996914942, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drnnGRUtanhRewards34 = [-0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17562802996914942]
drnnGRUtanhRewards35 = [-0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.1763540290620872, -0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228]
drnnGRUtanhRewards36 = [-0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnGRUtanhRewards37 = [-0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.177078750549934, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387]
drnnGRUtanhRewards38 = [-0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.17562802996914942, -0.17544633017412387]
drnnGRUtanhRewards39 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765]
drnnGRUtanhRewards40 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026]
drnnGRUtanhRewards41 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards42 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17544633017412387, -0.1749007498897221]
drnnGRUtanhRewards43 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards44 = [-0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnGRUtanhRewards45 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drnnGRUtanhRewards46 = [-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942]
drnnGRUtanhRewards47 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards48 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards49 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942]
# Deep Recurrent Reinforcement Learning: 1 capa GRU y 4 capas Dense, Funcion de activacion relu, 12 episodes, 50 iteraciones
drnnGRUreluMakespan0 = [800, 799, 798, 797, 798, 800, 800, 796, 800, 794, 800, 800]
drnnGRUreluMakespan1 = [798, 800, 805, 795, 799, 808, 795, 800, 796, 798, 799, 798]
drnnGRUreluMakespan2 = [799, 800, 806, 800, 800, 805, 805, 798, 799, 807, 800, 800]
drnnGRUreluMakespan3 = [798, 795, 799, 800, 800, 796, 798, 800, 800, 804, 805, 800]
drnnGRUreluMakespan4 = [811, 800, 799, 800, 805, 798, 798, 799, 796, 804, 805, 804]
drnnGRUreluMakespan5 = [799, 795, 797, 800, 798, 800, 800, 798, 800, 797, 800, 798]
drnnGRUreluMakespan6 = [798, 800, 798, 799, 797, 798, 800, 796, 801, 799, 795, 798]
drnnGRUreluMakespan7 = [800, 804, 795, 801, 796, 806, 805, 798, 800, 799, 799, 804]
drnnGRUreluMakespan8 = [800, 799, 799, 800, 805, 796, 800, 800, 810, 796, 800, 798]
drnnGRUreluMakespan9 = [794, 800, 799, 805, 800, 800, 798, 798, 796, 795, 798, 796]
drnnGRUreluMakespan10 = [798, 800, 798, 801, 795, 802, 796, 809, 800, 800, 798, 795]
drnnGRUreluMakespan11 = [804, 800, 799, 799, 798, 803, 798, 798, 805, 803, 800, 796]
drnnGRUreluMakespan12 = [800, 799, 805, 797, 798, 796, 799, 794, 799, 805, 799, 800]
drnnGRUreluMakespan13 = [796, 800, 798, 800, 795, 799, 800, 804, 800, 794, 805, 805]
drnnGRUreluMakespan14 = [800, 795, 796, 798, 798, 801, 805, 794, 800, 801, 801, 796]
drnnGRUreluMakespan15 = [798, 800, 796, 796, 798, 794, 797, 800, 796, 801, 795, 799]
drnnGRUreluMakespan16 = [800, 805, 794, 800, 799, 800, 805, 801, 798, 800, 801, 799]
drnnGRUreluMakespan17 = [797, 803, 801, 808, 794, 799, 799, 800, 805, 796, 801, 796]
drnnGRUreluMakespan18 = [805, 800, 800, 804, 799, 798, 800, 799, 804, 796, 800, 804]
drnnGRUreluMakespan19 = [804, 798, 800, 799, 799, 799, 805, 795, 801, 799, 799, 805]
drnnGRUreluMakespan20 = [799, 804, 796, 798, 796, 798, 800, 805, 799, 810, 800, 800]
drnnGRUreluMakespan21 = [798, 799, 799, 805, 798, 798, 805, 798, 794, 799, 798, 798]
drnnGRUreluMakespan22 = [799, 798, 798, 796, 798, 805, 799, 798, 798, 799, 796, 798]
drnnGRUreluMakespan23 = [798, 805, 808, 798, 798, 805, 810, 796, 804, 799, 800, 799]
drnnGRUreluMakespan24 = [798, 796, 798, 795, 800, 798, 799, 798, 797, 805, 798, 800]
drnnGRUreluMakespan25 = [799, 796, 799, 798, 805, 798, 798, 800, 796, 794, 810, 798]
drnnGRUreluMakespan26 = [799, 798, 805, 800, 802, 798, 799, 799, 799, 794, 802, 797]
drnnGRUreluMakespan27 = [798, 800, 805, 796, 798, 795, 802, 796, 798, 800, 798, 794]
drnnGRUreluMakespan28 = [796, 805, 798, 800, 800, 798, 810, 798, 798, 798, 796, 796]
drnnGRUreluMakespan29 = [800, 798, 798, 802, 794, 798, 796, 808, 800, 800, 798, 799]
drnnGRUreluMakespan30 = [798, 796, 798, 798, 794, 798, 794, 800, 796, 794, 800, 800]
drnnGRUreluMakespan31 = [794, 802, 797, 799, 798, 800, 799, 799, 796, 796, 798, 798]
drnnGRUreluMakespan32 = [799, 798, 794, 795, 798, 805, 804, 797, 795, 800, 796, 798]
drnnGRUreluMakespan33 = [803, 799, 805, 796, 794, 798, 797, 798, 798, 794, 794, 798]
drnnGRUreluMakespan34 = [810, 796, 795, 798, 799, 798, 796, 795, 795, 797, 798, 798]
drnnGRUreluMakespan35 = [799, 799, 799, 799, 795, 798, 795, 800, 796, 795, 795, 796]
drnnGRUreluMakespan36 = [795, 797, 798, 799, 799, 799, 800, 794, 796, 795, 798, 800]
drnnGRUreluMakespan37 = [800, 798, 799, 794, 800, 796, 798, 798, 797, 800, 794, 798]
drnnGRUreluMakespan38 = [800, 799, 794, 796, 795, 800, 796, 804, 800, 795, 800, 798]
drnnGRUreluMakespan39 = [794, 798, 795, 804, 805, 799, 798, 800, 796, 798, 795, 794]
drnnGRUreluMakespan40 = [799, 798, 796, 798, 798, 799, 800, 796, 798, 798, 799, 798]
drnnGRUreluMakespan41 = [796, 798, 800, 797, 799, 796, 797, 796, 799, 804, 805, 798]
drnnGRUreluMakespan42 = [798, 794, 795, 799, 799, 798, 797, 798, 798, 798, 798, 795]
drnnGRUreluMakespan43 = [799, 798, 794, 794, 795, 794, 795, 799, 799, 800, 799, 794]
drnnGRUreluMakespan44 = [795, 796, 795, 799, 794, 795, 794, 796, 795, 794, 795, 796]
drnnGRUreluMakespan45 = [794, 797, 794, 795, 796, 795, 794, 799, 795, 794, 798, 798]
drnnGRUreluMakespan46 = [795, 795, 794, 795, 794, 794, 792, 794, 795, 797, 794, 794]
drnnGRUreluMakespan47 = [798, 796, 797, 798, 794, 798, 794, 797, 794, 803, 798, 798]
drnnGRUreluMakespan48 = [795, 794, 796, 798, 795, 794, 796, 795, 796, 794, 796, 796]
drnnGRUreluMakespan49 = [798, 798, 796, 798, 798, 796, 796, 798, 798, 798, 796, 798]
drnnGRUreluRewards0 = [-0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards1 = [-0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17526455026455026, -0.1759911894273128, -0.1776214552648934, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drnnGRUreluRewards2 = [-0.1759911894273128, -0.17617264919621228, -0.17725973169122497, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.1774406332453826, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards3 = [-0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.177078750549934, -0.17617264919621228]
drnnGRUreluRewards4 = [-0.1781634446397188, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.1768976897689769, -0.177078750549934, -0.1768976897689769]
drnnGRUreluRewards5 = [-0.1759911894273128, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17562802996914942, -0.17617264919621228, -0.17580964970257765]
drnnGRUreluRewards6 = [-0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17562802996914942, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.1763540290620872, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765]
drnnGRUreluRewards7 = [-0.17617264919621228, -0.1768976897689769, -0.17526455026455026, -0.1763540290620872, -0.17544633017412387, -0.17725973169122497, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.1768976897689769]
drnnGRUreluRewards8 = [-0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.177078750549934, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.17798286090969018, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765]
drnnGRUreluRewards9 = [-0.17508269018743108, -0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387]
drnnGRUreluRewards10 = [-0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.1763540290620872, -0.17526455026455026, -0.17653532907770195, -0.17544633017412387, -0.1778021978021978, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026]
drnnGRUreluRewards11 = [-0.1768976897689769, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17580964970257765, -0.17671654929577466, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17671654929577466, -0.17617264919621228, -0.17544633017412387]
drnnGRUreluRewards12 = [-0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17562802996914942, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17508269018743108, -0.1759911894273128, -0.177078750549934, -0.1759911894273128, -0.17617264919621228]
drnnGRUreluRewards13 = [-0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.1768976897689769, -0.17617264919621228, -0.17508269018743108, -0.177078750549934, -0.177078750549934]
drnnGRUreluRewards14 = [-0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.1763540290620872, -0.1763540290620872, -0.17544633017412387]
drnnGRUreluRewards15 = [-0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17617264919621228, -0.17544633017412387, -0.1763540290620872, -0.17526455026455026, -0.1759911894273128]
drnnGRUreluRewards16 = [-0.17617264919621228, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.177078750549934, -0.1763540290620872, -0.17580964970257765, -0.17617264919621228, -0.1763540290620872, -0.1759911894273128]
drnnGRUreluRewards17 = [-0.17562802996914942, -0.17671654929577466, -0.1763540290620872, -0.1776214552648934, -0.17508269018743108, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17544633017412387, -0.1763540290620872, -0.17544633017412387]
drnnGRUreluRewards18 = [-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1768976897689769, -0.17544633017412387, -0.17617264919621228, -0.1768976897689769]
drnnGRUreluRewards19 = [-0.1768976897689769, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.177078750549934, -0.17526455026455026, -0.1763540290620872, -0.1759911894273128, -0.1759911894273128, -0.177078750549934]
drnnGRUreluRewards20 = [-0.1759911894273128, -0.1768976897689769, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.1759911894273128, -0.17798286090969018, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards21 = [-0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17508269018743108, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards22 = [-0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.177078750549934, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765]
drnnGRUreluRewards23 = [-0.17580964970257765, -0.177078750549934, -0.1776214552648934, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17798286090969018, -0.17544633017412387, -0.1768976897689769, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128]
drnnGRUreluRewards24 = [-0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.177078750549934, -0.17580964970257765, -0.17617264919621228]
drnnGRUreluRewards25 = [-0.1759911894273128, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17798286090969018, -0.17580964970257765]
drnnGRUreluRewards26 = [-0.1759911894273128, -0.17580964970257765, -0.177078750549934, -0.17617264919621228, -0.17653532907770195, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17508269018743108, -0.17653532907770195, -0.17562802996914942]
drnnGRUreluRewards27 = [-0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17653532907770195, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17508269018743108]
drnnGRUreluRewards28 = [-0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17798286090969018, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387]
drnnGRUreluRewards29 = [-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17653532907770195, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.1776214552648934, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128]
drnnGRUreluRewards30 = [-0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards31 = [-0.17508269018743108, -0.17653532907770195, -0.17562802996914942, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards32 = [-0.1759911894273128, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.1768976897689769, -0.177078750549934, -0.17562802996914942, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765]
drnnGRUreluRewards33 = [-0.17671654929577466, -0.1759911894273128, -0.177078750549934, -0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765]
drnnGRUreluRewards34 = [-0.17798286090969018, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards35 = [-0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387]
drnnGRUreluRewards36 = [-0.17526455026455026, -0.17562802996914942, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228]
drnnGRUreluRewards37 = [-0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17580964970257765]
drnnGRUreluRewards38 = [-0.17617264919621228, -0.1759911894273128, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.1768976897689769, -0.17617264919621228, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765]
drnnGRUreluRewards39 = [-0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.1768976897689769, -0.177078750549934, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108]
drnnGRUreluRewards40 = [-0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drnnGRUreluRewards41 = [-0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17562802996914942, -0.1759911894273128, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387, -0.1759911894273128, -0.1768976897689769, -0.177078750549934, -0.17580964970257765]
drnnGRUreluRewards42 = [-0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026]
drnnGRUreluRewards43 = [-0.1759911894273128, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128, -0.17508269018743108]
drnnGRUreluRewards44 = [-0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387]
drnnGRUreluRewards45 = [-0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards46 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108]
drnnGRUreluRewards47 = [-0.17580964970257765, -0.17544633017412387, -0.17562802996914942, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17671654929577466, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards48 = [-0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17544633017412387]
drnnGRUreluRewards49 = [-0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765]
# Deep Reinforcement Learning: 5 capas Dense, Funcion de activacion tanh, 12 episodios, 50 iteraciones
drlTanhMakespan0 = [794, 794, 805, 799, 810, 800, 794, 810, 804, 806, 812, 808]
drlTanhMakespan1 = [796, 795, 795, 798, 799, 800, 800, 795, 797, 796, 797, 799]
drlTanhMakespan2 = [800, 797, 798, 801, 799, 800, 796, 795, 797, 796, 794, 798]
drlTanhMakespan3 = [800, 795, 799, 796, 799, 798, 795, 799, 795, 799, 798, 796]
drlTanhMakespan4 = [809, 795, 795, 800, 797, 795, 798, 798, 799, 799, 798, 798]
drlTanhMakespan5 = [795, 795, 795, 799, 795, 798, 795, 800, 795, 796, 795, 805]
drlTanhMakespan6 = [794, 800, 795, 793, 798, 795, 794, 798, 795, 799, 795, 796]
drlTanhMakespan7 = [795, 795, 795, 795, 798, 795, 797, 797, 795, 795, 798, 797]
drlTanhMakespan8 = [795, 795, 795, 794, 800, 800, 794, 795, 794, 794, 797, 795]
drlTanhMakespan9 = [793, 794, 796, 795, 796, 800, 794, 797, 793, 795, 798, 795]
drlTanhMakespan10 = [795, 795, 797, 794, 795, 798, 797, 795, 798, 794, 794, 794]
drlTanhMakespan11 = [795, 795, 795, 795, 797, 795, 795, 794, 795, 795, 795, 794]
drlTanhMakespan12 = [794, 798, 795, 794, 795, 795, 795, 797, 799, 795, 795, 795]
drlTanhMakespan13 = [795, 797, 795, 800, 796, 795, 796, 795, 795, 795, 798, 794]
drlTanhMakespan14 = [795, 795, 796, 794, 794, 794, 797, 795, 798, 795, 795, 793]
drlTanhMakespan15 = [799, 794, 795, 795, 795, 796, 801, 797, 795, 794, 795, 799]
drlTanhMakespan16 = [795, 795, 796, 798, 795, 795, 795, 795, 795, 798, 798, 796]
drlTanhMakespan17 = [800, 798, 795, 795, 798, 794, 795, 795, 797, 795, 796, 794]
drlTanhMakespan18 = [797, 800, 798, 797, 796, 794, 799, 797, 795, 796, 799, 798]
drlTanhMakespan19 = [797, 800, 795, 794, 794, 796, 795, 798, 796, 798, 797, 795]
drlTanhMakespan20 = [794, 795, 795, 799, 798, 797, 795, 795, 798, 795, 798, 795]
drlTanhMakespan21 = [796, 795, 795, 795, 795, 797, 798, 794, 797, 795, 796, 794]
drlTanhMakespan22 = [799, 796, 795, 795, 795, 795, 796, 795, 796, 798, 796, 795]
drlTanhMakespan23 = [799, 799, 795, 796, 796, 799, 796, 797, 794, 794, 798, 796]
drlTanhMakespan24 = [795, 795, 797, 800, 797, 795, 795, 796, 795, 795, 798, 799]
drlTanhMakespan25 = [795, 797, 795, 795, 795, 795, 800, 796, 795, 797, 795, 795]
drlTanhMakespan26 = [795, 795, 799, 794, 797, 794, 794, 798, 794, 796, 795, 798]
drlTanhMakespan27 = [796, 796, 795, 796, 798, 797, 794, 795, 794, 794, 794, 798]
drlTanhMakespan28 = [795, 795, 794, 798, 796, 796, 800, 797, 797, 796, 795, 794]
drlTanhMakespan29 = [795, 795, 798, 800, 797, 794, 796, 794, 792, 794, 794, 795]
drlTanhMakespan30 = [798, 797, 795, 799, 797, 800, 798, 799, 797, 800, 794, 796]
drlTanhMakespan31 = [794, 795, 800, 798, 800, 794, 800, 798, 799, 798, 798, 798]
drlTanhMakespan32 = [795, 795, 795, 794, 794, 794, 793, 795, 794, 793, 794, 795]
drlTanhMakespan33 = [794, 797, 792, 794, 795, 795, 797, 795, 795, 794, 792, 795]
drlTanhMakespan34 = [795, 794, 795, 798, 795, 796, 794, 795, 794, 794, 795, 794]
drlTanhMakespan35 = [796, 794, 797, 793, 794, 798, 795, 794, 793, 793, 795, 794]
drlTanhMakespan36 = [795, 795, 794, 795, 795, 795, 794, 795, 795, 793, 795, 794]
drlTanhMakespan37 = [794, 794, 798, 794, 794, 796, 795, 794, 793, 795, 795, 792]
drlTanhMakespan38 = [794, 796, 795, 794, 798, 798, 795, 795, 794, 794, 795, 794]
drlTanhMakespan39 = [794, 795, 795, 796, 792, 794, 795, 794, 795, 794, 794, 795]
drlTanhMakespan40 = [798, 795, 794, 795, 794, 794, 793, 795, 794, 794, 797, 794]
drlTanhMakespan41 = [795, 792, 795, 794, 794, 795, 794, 795, 792, 797, 795, 795]
drlTanhMakespan42 = [792, 794, 794, 795, 794, 794, 795, 794, 792, 794, 794, 794]
drlTanhMakespan43 = [794, 796, 794, 793, 795, 795, 793, 798, 794, 794, 798, 794]
drlTanhMakespan44 = [794, 794, 794, 794, 795, 794, 793, 794, 794, 795, 795, 794]
drlTanhMakespan45 = [790, 794, 793, 794, 793, 794, 795, 794, 791, 795, 795, 794]
drlTanhMakespan46 = [792, 794, 794, 794, 794, 794, 794, 793, 794, 794, 794, 794]
drlTanhMakespan47 = [794, 794, 794, 794, 794, 794, 794, 794, 792, 795, 793, 795]
drlTanhMakespan48 = [794, 794, 792, 792, 797, 794, 792, 794, 794, 795, 794, 795]
drlTanhMakespan49 = [795, 794, 794, 796, 794, 797, 794, 794, 794, 794, 794, 794]
drlTanhMakespan50 = [794, 792, 795, 794, 794, 794, 794, 794, 795, 794, 795, 794]
drlTanhMakespan51 = [794, 792, 796, 795, 794, 794, 795, 794, 795, 795, 795, 794]
drlTanhMakespan52 = [794, 794, 795, 792, 795, 795, 795, 792, 794, 793, 795, 794]
drlTanhMakespan53 = [794, 792, 794, 792, 794, 794, 794, 795, 795, 794, 794, 792]
drlTanhMakespan54 = [795, 793, 794, 794, 794, 792, 795, 794, 794, 792, 794, 796]
drlTanhMakespan55 = [795, 794, 794, 795, 795, 793, 794, 795, 794, 797, 795, 792]
drlTanhMakespan56 = [795, 795, 792, 795, 794, 795, 794, 794, 794, 795, 795, 795]
drlTanhMakespan57 = [795, 792, 795, 794, 795, 795, 792, 795, 794, 797, 792, 792]
drlTanhMakespan58 = [795, 795, 794, 795, 792, 794, 794, 794, 792, 792, 792, 793]
drlTanhMakespan59 = [795, 794, 792, 794, 794, 794, 792, 794, 794, 794, 793, 795]
drlTanhMakespan60 = [794, 795, 795, 795, 798, 794, 794, 794, 794, 794, 794, 792]
drlTanhMakespan61 = [792, 795, 794, 794, 795, 794, 792, 795, 795, 794, 794, 795]
drlTanhMakespan62 = [795, 794, 794, 794, 799, 794, 792, 794, 795, 795, 794, 793]
drlTanhMakespan63 = [791, 795, 792, 796, 794, 794, 792, 795, 793, 794, 792, 794]
drlTanhRewards0 = [-0.17508269018743108, -0.17508269018743108, -0.177078750549934, -0.1759911894273128, -0.17798286090969018, -0.17617264919621228, -0.17508269018743108, -0.17798286090969018, -0.1768976897689769, -0.17725973169122497, -0.17834394904458598, -0.1776214552648934]
drlTanhRewards1 = [-0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.17526455026455026, -0.17562802996914942, -0.17544633017412387, -0.17562802996914942, -0.1759911894273128]
drlTanhRewards2 = [-0.17617264919621228, -0.17562802996914942, -0.17580964970257765, -0.1763540290620872, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17544633017412387, -0.17580964970257765]
drlTanhRewards3 = [-0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387]
drlTanhRewards4 = [-0.1778021978021978, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drlTanhRewards5 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.177078750549934]
drlTanhRewards6 = [-0.17508269018743108, -0.17617264919621228, -0.17526455026455026, -0.1749007498897221, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17544633017412387]
drlTanhRewards7 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942]
drlTanhRewards8 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026]
drlTanhRewards9 = [-0.1749007498897221, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17617264919621228, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.1749007498897221, -0.17580964970257765, -0.17526455026455026]
drlTanhRewards10 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards11 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards12 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards13 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108]
drlTanhRewards14 = [-0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.1749007498897221]
drlTanhRewards15 = [-0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17562802996914942, -0.1763540290620872, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128]
drlTanhRewards16 = [-0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387]
drlTanhRewards17 = [-0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108]
drlTanhRewards18 = [-0.17562802996914942, -0.17617264919621228, -0.17580964970257765, -0.17562802996914942, -0.17544633017412387, -0.1759911894273128, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765]
drlTanhRewards19 = [-0.17562802996914942, -0.17617264919621228, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026]
drlTanhRewards20 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026]
drlTanhRewards21 = [-0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108]
drlTanhRewards22 = [-0.1759911894273128, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026]
drlTanhRewards23 = [-0.1759911894273128, -0.1759911894273128, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.1759911894273128, -0.17544633017412387, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387]
drlTanhRewards24 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128]
drlTanhRewards25 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards26 = [-0.17526455026455026, -0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765]
drlTanhRewards27 = [-0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108]
drlTanhRewards28 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387, -0.17617264919621228, -0.17562802996914942, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards29 = [-0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17562802996914942, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards30 = [-0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.1759911894273128, -0.17562802996914942, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387]
drlTanhRewards31 = [-0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765]
drlTanhRewards32 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards33 = [-0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026]
drlTanhRewards34 = [-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards35 = [-0.17544633017412387, -0.17508269018743108, -0.17562802996914942, -0.1749007498897221, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards36 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards37 = [-0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224]
drlTanhRewards38 = [-0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards39 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards40 = [-0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108]
drlTanhRewards41 = [-0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards42 = [-0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards43 = [-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108]
drlTanhRewards44 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards45 = [-0.1749007498897221, -0.17435444714191128, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17453662842012357, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards46 = [-0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards47 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026]
drlTanhRewards48 = [-0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17471872931833224, -0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards49 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards50 = [-0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards51 = [-0.17508269018743108, -0.17471872931833224, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards52 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards53 = [-0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224]
drlTanhRewards54 = [-0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17544633017412387]
drlTanhRewards55 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17471872931833224]
drlTanhRewards56 = [-0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards57 = [-0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.17471872931833224]
drlTanhRewards58 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17471872931833224, -0.17471872931833224, -0.1749007498897221]
drlTanhRewards59 = [-0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026]
drlTanhRewards60 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224]
drlTanhRewards61 = [-0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards62 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1759911894273128, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221]
drlTanhRewards63 = [-0.17453662842012357, -0.17471872931833224, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108]
# Deep Reinforcement Learning: 5 capas Dense, Funcion de activacion relu, 12 episodios, 50 iteraciones
drlReluMakespan0 = [796, 798, 809, 798, 796, 800, 798, 799, 800, 794, 800, 798]
drlReluMakespan1 = [800, 800, 801, 806, 804, 806, 808, 798, 796, 796, 798, 800]
drlReluMakespan2 = [805, 805, 798, 800, 800, 798, 801, 799, 800, 806, 800, 800]
drlReluMakespan3 = [798, 799, 798, 795, 798, 808, 803, 800, 798, 795, 799, 800]
drlReluMakespan4 = [805, 805, 799, 796, 798, 803, 799, 800, 800, 800, 795, 794]
drlReluMakespan5 = [799, 796, 795, 800, 801, 796, 800, 795, 803, 800, 800, 805]
drlReluMakespan6 = [799, 795, 798, 794, 805, 796, 795, 799, 798, 795, 804, 796]
drlReluMakespan7 = [795, 798, 799, 798, 798, 799, 795, 794, 796, 794, 795, 805]
drlReluMakespan8 = [805, 794, 794, 795, 798, 795, 798, 795, 799, 800, 796, 798]
drlReluMakespan9 = [797, 797, 797, 794, 795, 794, 794, 797, 796, 795, 801, 799]
drlReluMakespan10 = [799, 794, 797, 795, 794, 794, 795, 795, 795, 796, 797, 799]
drlReluMakespan11 = [796, 798, 800, 795, 805, 794, 798, 796, 795, 794, 798, 795]
drlReluMakespan12 = [800, 795, 794, 798, 800, 805, 800, 798, 804, 799, 794, 803]
drlReluMakespan13 = [796, 799, 798, 794, 800, 794, 795, 796, 798, 795, 794, 799]
drlReluMakespan14 = [795, 798, 798, 798, 805, 798, 798, 798, 795, 794, 800, 796]
drlReluMakespan15 = [795, 798, 795, 805, 798, 794, 795, 798, 796, 794, 795, 796]
drlReluMakespan16 = [798, 795, 796, 799, 796, 798, 798, 795, 795, 795, 795, 799]
drlReluMakespan17 = [794, 798, 796, 798, 795, 801, 794, 798, 797, 795, 796, 801]
drlReluMakespan18 = [798, 795, 798, 798, 801, 798, 795, 795, 797, 800, 794, 800]
drlReluMakespan19 = [795, 798, 794, 800, 796, 795, 798, 797, 795, 794, 796, 796]
drlReluMakespan20 = [794, 794, 795, 795, 795, 795, 796, 798, 799, 799, 799, 795]
drlReluMakespan21 = [802, 796, 794, 797, 797, 800, 794, 794, 804, 803, 798, 797]
drlReluMakespan22 = [794, 795, 795, 795, 798, 795, 794, 799, 794, 803, 795, 794]
drlReluMakespan23 = [794, 798, 799, 794, 795, 795, 799, 795, 796, 795, 797, 799]
drlReluMakespan24 = [795, 794, 797, 800, 794, 795, 795, 795, 795, 800, 800, 798]
drlReluMakespan25 = [795, 794, 797, 796, 798, 795, 795, 794, 799, 795, 794, 798]
drlReluMakespan26 = [801, 795, 800, 794, 794, 796, 800, 798, 798, 799, 794, 796]
drlReluMakespan27 = [796, 795, 796, 795, 796, 795, 795, 800, 794, 794, 794, 796]
drlReluMakespan28 = [794, 794, 795, 796, 794, 795, 795, 797, 794, 794, 796, 795]
drlReluMakespan29 = [793, 794, 795, 800, 795, 795, 794, 798, 798, 796, 795, 794]
drlReluMakespan30 = [802, 794, 794, 798, 794, 796, 805, 794, 800, 794, 796, 794]
drlReluMakespan31 = [797, 794, 794, 794, 800, 800, 794, 794, 798, 795, 794, 798]
drlReluMakespan32 = [794, 798, 794, 795, 794, 795, 798, 794, 794, 795, 794, 798]
drlReluMakespan33 = [798, 794, 798, 795, 794, 793, 797, 798, 794, 794, 801, 793]
drlReluMakespan34 = [794, 798, 794, 795, 794, 793, 798, 795, 794, 800, 794, 795]
drlReluMakespan35 = [794, 796, 794, 796, 806, 795, 795, 795, 796, 795, 795, 799]
drlReluMakespan36 = [795, 794, 794, 796, 796, 798, 794, 796, 794, 795, 794, 795]
drlReluMakespan37 = [795, 794, 795, 798, 794, 794, 794, 794, 794, 794, 795, 797]
drlReluMakespan38 = [794, 798, 794, 798, 797, 794, 794, 795, 795, 794, 795, 795]
drlReluMakespan39 = [797, 794, 795, 796, 796, 796, 798, 794, 794, 795, 794, 798]
drlReluMakespan40 = [798, 795, 795, 798, 792, 795, 795, 794, 795, 794, 798, 794]
drlReluMakespan41 = [795, 794, 794, 794, 794, 794, 798, 793, 794, 794, 794, 793]
drlReluMakespan42 = [794, 794, 794, 794, 799, 794, 795, 794, 796, 794, 794, 794]
drlReluMakespan43 = [794, 797, 795, 794, 795, 794, 794, 795, 794, 794, 793, 794]
drlReluMakespan44 = [794, 792, 793, 794, 794, 796, 794, 798, 795, 794, 794, 796]
drlReluMakespan45 = [795, 794, 799, 794, 794, 793, 794, 795, 795, 793, 796, 794]
drlReluMakespan46 = [794, 796, 794, 794, 794, 794, 794, 793, 799, 792, 794, 794]
drlReluMakespan47 = [795, 794, 793, 794, 796, 797, 794, 794, 795, 794, 794, 794]
drlReluMakespan48 = [794, 794, 794, 792, 794, 794, 795, 794, 794, 794, 794, 794]
drlReluMakespan49 = [794, 794, 795, 792, 797, 797, 794, 794, 792, 800, 795, 795]
drlReluRewards0 = [-0.17544633017412387, -0.17580964970257765, -0.1778021978021978, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765]
drlReluRewards1 = [-0.17617264919621228, -0.17617264919621228, -0.1763540290620872, -0.17725973169122497, -0.1768976897689769, -0.17725973169122497, -0.1776214552648934, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228]
drlReluRewards2 = [-0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1763540290620872, -0.1759911894273128, -0.17617264919621228, -0.17725973169122497, -0.17617264919621228, -0.17617264919621228]
drlReluRewards3 = [-0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.1776214552648934, -0.17671654929577466, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228]
drlReluRewards4 = [-0.177078750549934, -0.177078750549934, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765, -0.17671654929577466, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17526455026455026, -0.17508269018743108]
drlReluRewards5 = [-0.1759911894273128, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.1763540290620872, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.17671654929577466, -0.17617264919621228, -0.17617264919621228, -0.177078750549934]
drlReluRewards6 = [-0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.177078750549934, -0.17544633017412387, -0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.1768976897689769, -0.17544633017412387]
drlReluRewards7 = [-0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.177078750549934]
drlReluRewards8 = [-0.177078750549934, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765]
drlReluRewards9 = [-0.17562802996914942, -0.17562802996914942, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17544633017412387, -0.17526455026455026, -0.1763540290620872, -0.1759911894273128]
drlReluRewards10 = [-0.1759911894273128, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17562802996914942, -0.1759911894273128]
drlReluRewards11 = [-0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026, -0.177078750549934, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026]
drlReluRewards12 = [-0.17617264919621228, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17617264919621228, -0.17580964970257765, -0.1768976897689769, -0.1759911894273128, -0.17508269018743108, -0.17671654929577466]
drlReluRewards13 = [-0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128]
drlReluRewards14 = [-0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387]
drlReluRewards15 = [-0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.177078750549934, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387]
drlReluRewards16 = [-0.17580964970257765, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128]
drlReluRewards17 = [-0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.1763540290620872, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.1763540290620872]
drlReluRewards18 = [-0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228]
drlReluRewards19 = [-0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17544633017412387]
drlReluRewards20 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17526455026455026]
drlReluRewards21 = [-0.17653532907770195, -0.17544633017412387, -0.17562802996914942, -0.17508269018743108, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.1768976897689769, -0.17671654929577466, -0.17562802996914942]
drlReluRewards22 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17508269018743108, -0.17671654929577466, -0.17526455026455026, -0.17508269018743108]
drlReluRewards23 = [-0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17562802996914942, -0.1759911894273128]
drlReluRewards24 = [-0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765]
drlReluRewards25 = [-0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drlReluRewards26 = [-0.1763540290620872, -0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17508269018743108, -0.17544633017412387]
drlReluRewards27 = [-0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387]
drlReluRewards28 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drlReluRewards29 = [-0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drlReluRewards30 = [-0.17653532907770195, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17544633017412387, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108]
drlReluRewards31 = [-0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765]
drlReluRewards32 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drlReluRewards33 = [-0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17562802996914942, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.1763540290620872, -0.1749007498897221]
drlReluRewards34 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026]
drlReluRewards35 = [-0.17508269018743108, -0.17544633017412387, -0.17725973169122497, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128]
drlReluRewards36 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlReluRewards37 = [-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942]
drlReluRewards38 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drlReluRewards39 = [-0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drlReluRewards40 = [-0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108]
drlReluRewards41 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221]
drlReluRewards42 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlReluRewards43 = [-0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108]
drlReluRewards44 = [-0.17508269018743108, -0.17471872931833224, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387]
drlReluRewards45 = [-0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17544633017412387, -0.17508269018743108]
drlReluRewards46 = [-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.1759911894273128, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108]
drlReluRewards47 = [-0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17544633017412387, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlReluRewards48 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlReluRewards49 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17562802996914942, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17617264919621228, -0.17526455026455026, -0.17526455026455026]
if __name__ == "__main__":
##############################################
##############################################
##############################################
# Deep Recurrent Reinforcement Learning with 1 GRU layer and 4 Dense layers
drnnGRUtanhMakespan = []
drnnGRUtanhRewards = []
drnnGRUtanhMakespanList = []
drnnGRUtanhRewardsList = []
drnnGRUtanhMakespanValues = []
drnnGRUtanhRewardsValues = []
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan0))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan1))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan2))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan3))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan4))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan5))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan6))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan7))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan8))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan9))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan10))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan11))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan12))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan13))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan14))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan15))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan16))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan17))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan18))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan19))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan20))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan21))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan22))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan23))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan24))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan25))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan26))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan27))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan28))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan29))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan30))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan31))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan32))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan33))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan34))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan35))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan36))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan37))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan38))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan39))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan40))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan41))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan42))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan43))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan44))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan45))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan46))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan47))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan48))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan49))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards0))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards1))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards2))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards3))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards4))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards5))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards6))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards7))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards8))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards9))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards10))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards11))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards12))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards13))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards14))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards15))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards16))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards17))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards18))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards19))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards20))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards21))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards22))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards23))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards24))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards25))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards26))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards27))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards28))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards29))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards30))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards31))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards32))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards33))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards34))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards35))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards36))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards37))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards38))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards39))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards40))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards41))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards42))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards43))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards44))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards45))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards46))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards47))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards48))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards49))
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan0)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan1)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan2)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan3)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan4)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan5)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan6)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan7)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan8)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan9)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan10)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan11)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan12)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan13)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan14)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan15)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan16)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan17)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan18)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan19)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan20)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan21)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan22)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan23)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan24)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan25)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan26)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan27)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan28)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan29)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan30)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan31)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan32)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan33)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan34)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan35)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan36)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan37)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan38)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan39)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan40)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan41)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan42)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan43)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan44)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan45)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan46)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan47)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan48)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan49)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards0)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards1)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards2)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards3)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards4)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards5)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards6)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards7)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards8)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards9)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards10)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards11)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards12)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards13)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards14)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards15)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards16)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards17)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards18)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards19)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards20)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards21)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards22)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards23)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards24)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards25)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards26)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards27)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards28)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards29)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards30)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards31)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards32)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards33)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards34)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards35)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards36)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards37)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards38)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards39)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards40)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards41)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards42)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards43)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards44)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards45)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards46)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards47)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards48)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards49)
drnnGRUreluMakespan = []
drnnGRUreluRewards = []
drnnGRUreluMakespanList = []
drnnGRUreluRewardsList = []
drnnGRUreluMakespanValues = []
drnnGRUreluRewardsValues = []
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan0))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan1))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan2))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan3))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan4))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan5))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan6))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan7))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan8))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan9))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan10))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan11))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan12))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan13))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan14))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan15))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan16))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan17))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan18))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan19))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan20))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan21))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan22))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan23))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan24))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan25))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan26))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan27))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan28))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan29))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan30))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan31))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan32))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan33))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan34))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan35))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan36))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan37))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan38))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan39))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan40))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan41))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan42))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan43))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan44))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan45))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan46))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan47))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan48))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan49))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards0))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards1))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards2))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards3))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards4))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards5))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards6))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards7))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards8))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards9))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards10))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards11))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards12))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards13))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards14))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards15))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards16))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards17))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards18))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards19))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards20))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards21))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards22))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards23))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards24))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards25))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards26))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards27))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards28))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards29))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards30))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards31))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards32))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards33))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards34))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards35))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards36))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards37))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards38))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards39))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards40))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards41))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards42))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards43))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards44))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards45))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards46))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards47))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards48))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards49))
drnnGRUreluMakespanList.append(drnnGRUreluMakespan0)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan1)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan2)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan3)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan4)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan5)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan6)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan7)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan8)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan9)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan10)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan11)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan12)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan13)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan14)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan15)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan16)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan17)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan18)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan19)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan20)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan21)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan22)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan23)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan24)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan25)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan26)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan27)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan28)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan29)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan30)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan31)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan32)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan33)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan34)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan35)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan36)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan37)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan38)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan39)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan40)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan41)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan42)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan43)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan44)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan45)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan46)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan47)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan48)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan49)
drnnGRUreluRewardsList.append(drnnGRUreluRewards0)
drnnGRUreluRewardsList.append(drnnGRUreluRewards1)
drnnGRUreluRewardsList.append(drnnGRUreluRewards2)
drnnGRUreluRewardsList.append(drnnGRUreluRewards3)
drnnGRUreluRewardsList.append(drnnGRUreluRewards4)
drnnGRUreluRewardsList.append(drnnGRUreluRewards5)
drnnGRUreluRewardsList.append(drnnGRUreluRewards6)
drnnGRUreluRewardsList.append(drnnGRUreluRewards7)
drnnGRUreluRewardsList.append(drnnGRUreluRewards8)
drnnGRUreluRewardsList.append(drnnGRUreluRewards9)
drnnGRUreluRewardsList.append(drnnGRUreluRewards10)
drnnGRUreluRewardsList.append(drnnGRUreluRewards11)
drnnGRUreluRewardsList.append(drnnGRUreluRewards12)
drnnGRUreluRewardsList.append(drnnGRUreluRewards13)
drnnGRUreluRewardsList.append(drnnGRUreluRewards14)
drnnGRUreluRewardsList.append(drnnGRUreluRewards15)
drnnGRUreluRewardsList.append(drnnGRUreluRewards16)
drnnGRUreluRewardsList.append(drnnGRUreluRewards17)
drnnGRUreluRewardsList.append(drnnGRUreluRewards18)
drnnGRUreluRewardsList.append(drnnGRUreluRewards19)
drnnGRUreluRewardsList.append(drnnGRUreluRewards20)
drnnGRUreluRewardsList.append(drnnGRUreluRewards21)
drnnGRUreluRewardsList.append(drnnGRUreluRewards22)
drnnGRUreluRewardsList.append(drnnGRUreluRewards23)
drnnGRUreluRewardsList.append(drnnGRUreluRewards24)
drnnGRUreluRewardsList.append(drnnGRUreluRewards25)
drnnGRUreluRewardsList.append(drnnGRUreluRewards26)
drnnGRUreluRewardsList.append(drnnGRUreluRewards27)
drnnGRUreluRewardsList.append(drnnGRUreluRewards28)
drnnGRUreluRewardsList.append(drnnGRUreluRewards29)
drnnGRUreluRewardsList.append(drnnGRUreluRewards30)
drnnGRUreluRewardsList.append(drnnGRUreluRewards31)
drnnGRUreluRewardsList.append(drnnGRUreluRewards32)
drnnGRUreluRewardsList.append(drnnGRUreluRewards33)
drnnGRUreluRewardsList.append(drnnGRUreluRewards34)
drnnGRUreluRewardsList.append(drnnGRUreluRewards35)
drnnGRUreluRewardsList.append(drnnGRUreluRewards36)
drnnGRUreluRewardsList.append(drnnGRUreluRewards37)
drnnGRUreluRewardsList.append(drnnGRUreluRewards38)
drnnGRUreluRewardsList.append(drnnGRUreluRewards39)
drnnGRUreluRewardsList.append(drnnGRUreluRewards40)
drnnGRUreluRewardsList.append(drnnGRUreluRewards41)
drnnGRUreluRewardsList.append(drnnGRUreluRewards42)
drnnGRUreluRewardsList.append(drnnGRUreluRewards43)
drnnGRUreluRewardsList.append(drnnGRUreluRewards44)
drnnGRUreluRewardsList.append(drnnGRUreluRewards45)
drnnGRUreluRewardsList.append(drnnGRUreluRewards46)
drnnGRUreluRewardsList.append(drnnGRUreluRewards47)
drnnGRUreluRewardsList.append(drnnGRUreluRewards48)
drnnGRUreluRewardsList.append(drnnGRUreluRewards49)
for vector in drnnGRUtanhMakespanList:
for element in vector:
drnnGRUtanhMakespanValues.append(element)
for vector in drnnGRUtanhRewardsList:
for element in vector:
drnnGRUtanhRewardsValues.append(element)
##################
for vector in drnnGRUreluMakespanList:
for element in vector:
drnnGRUreluMakespanValues.append(element)
for vector in drnnGRUreluRewardsList:
for element in vector:
drnnGRUreluRewardsValues.append(element)
#####################
smoothGRUtanhMakespanValues = | pd.Series(drnnGRUtanhMakespanValues) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Criado em 2018-09-15
@author: <NAME>
"""
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
RODADA_ATUAL = 38
# Importing the dataset
df = pd.read_csv('data/dados_2018.csv')
df = df[df['atletas.rodada_id'] >= 6]
df = df.drop(['A', 'CA', 'CV', 'DD', 'DP', 'FC', 'FD', 'FF', 'FS', 'FT', 'G', 'GC', \
'GS', 'I', 'PE', 'PP', 'RB', 'SG'], axis=1)
###
### Modelo MLR with one-hot encode for position and opponet team
###
array_pos = df[['atletas.posicao_id']]
array_pos = | pd.get_dummies(array_pos, drop_first=True) | pandas.get_dummies |
#-*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import math
# SPC rule 기반 이상감지
# 문의사항은 민경란 <EMAIL> 으로 부탁드립니다
###### ruleset informtion ######
# 0. basic
# 1. Nelson
# 2. WE
# 3. WE (supplemental)
# 4. WE (Asymmetric control limits)
# 5. Juran
# 6. Gitlow
# 7. Duncan
# 8. Westgard
# 9. AIAG (보류)
# 10. Hughes (보류)
################################
def setting_ruleset(num) :
ruleset = pd.DataFrame({'ruleset_id':[num]})
ruleset['name'] = "none"
ruleset['description'] = u" * 룰셋이 존재하지 않습니다"
ruleset['num_rules'] = 0
ruleset['window_size'] = 0
if num == 1 :
ruleset['name'] = u'Nelson Rule'
ruleset['description'] = u""" * Nelson 규칙세트가 정의하는 아래 규칙 중 하나라도 해당 할 경우 이상으로 판단함.
[규칙 1] 최근 샘플이 '평균 +- 3*표준편차' 범위를 벗어남.
[규칙 2] 최근 3 샘플 중 2개 이상이 같은 방향으로 '평균 +- 2*표준편차' 범위를 벗어남.
[규칙 3] 최근 5 샘플 중 4개 이상이 같은 방향으로 '평균 +- 표준편차' 범위를 벗어남.
[규칙 4] 최근 9 샘플이 연속으로 평균을 기준으로 같은 방향에 위치함.
[규칙 5] 최근 6 샘플이 연속으로 증가 (또는 감소) 함.
[규칙 6] 최근 15 샘플이 연속으로 '평균 +- 표준편차' 범위를 벗어나지 않음.
[규칙 7] 최근 14 샘플이 연속으로 증가와 감소의 전환을 반복함.
[규칙 8] 최근 8 샘플이 연속으로 '평균 +- 표준편차' 범위를 벗어남. """
ruleset['num_rules'] = 8
ruleset['window_size'] = 15
elif num ==2 :
ruleset['name'] = u'WE Rule'
ruleset['description'] = u""" * Western Electric 규칙세트가 정의하는 아래 규칙 중 하나라도 해당 할 경우 이상으로 판단함.
[규칙 1] 최근 샘플이 '평균 +- 3*표준편차' 범위를 벗어남.
[규칙 2] 최근 3 샘플 중 2개 이상이 같은 방향으로 '평균 + 2*표준편차' 범위를 벗어남.
[규칙 3] 최근 5 샘플 중 4개 이상이 같은 방향으로 '평균 + 표준편차' 범위를 벗어남.
[규칙 4] 최근 8 샘플이 연속으로 평균을 기준으로 같은 방향에 위치함. """
ruleset['num_rules'] = 4
ruleset['window_size'] = 8
elif num == 3 :
ruleset['name'] = u'WE supplemental Rule'
ruleset['description'] = u""" * Western Electric supplemental 규칙세트가 정의하는 아래 규칙 중 하나라도 위반할 경우 이상으로 판단함.
[규칙 1] 최근 샘플이 '평균 +- 3*표준편차' 범위를 벗어남.
[규칙 2] 최근 3 샘플 중 2개 이상이 같은 방향으로 '평균 + 2*표준편차' 범위를 벗어남.
[규칙 3] 최근 5 샘플 중 4개 이상이 같은 방향으로 '평균 + 표준편차' 범위를 벗어남.
[규칙 4] 최근 8 샘플이 연속으로 평균을 기준으로 같은 방향에 위치함.
[규칙 5] 최근 6 샘플이 연속으로 증가 (또는 감소) 함.
[규칙 6] 최근 15 샘플이 연속으로 '평균 +- 표준편차' 범위를 벗어나지 않음.
[규칙 7] 최근 14 샘플이 연속으로 증가와 감소의 전환을 반복함.
[규칙 8] 최근 8 샘플이 연속으로 '평균 +- 표준편차' 범위를 벗어남. """
ruleset['num_rules'] = 8
ruleset['window_size'] = 15
elif num ==4 :
ruleset['name'] = u'WE Rule - Asymmetric control limits for small sample'
ruleset['description'] = u""" * Western Electric 규칙세트가 정의하는 아래 규칙 중 하나라도 위반할 경우 이상으로 판단함
[규칙 1] 최근 샘플이 '평균 + 3*표준편차' 범위를 벗어남.
[규칙 2] 최근 2 샘플이 연속으로 '평균 + 2*표준편차' 위를 벗어남.
[규칙 3] 최근 3 샘플이 연속으로 '평균 + 표준편차' 위를 벗어남.
[규칙 4] 최근 7 샘플이 연속으로 '평균' 위에 위치함.
[규칙 5] 최근 10 샘플이 연속으로 '평균' 아래에 위치함.
[규칙 6] 최근 6 샘플이 연속으로 '평균 - 표준편차' 아래를 벗어남.
[규칙 7] 최근 4 샘플이 연속으로 '평균 - 2*표준편차' 아래를 벗어남. """
ruleset['num_rules'] = 7
ruleset['window_size'] = 10
elif num == 5 :
ruleset['name'] = u'Juran Rule'
ruleset['description'] = u""" * Juran 규칙세트가 포함하는 아래 규칙 중 하나라도 위반할 경우 이상으로 판단함.
[규칙 1] 최근 샘플이 '평균 + 3*표준편차' 범위를 벗어남.
[규칙 2] 최근 3 샘플 중 2개 이상이 같은 방향으로 '평균 +- 2*표준편차' 범위를 벗어남.
[규칙 3] 최근 5 샘플 중 4개 이상이 같은 방향으로 '평균 +- 표준편차' 범위를 벗어남.
[규칙 4] 최근 9 샘플이 연속으로 평균을 기준으로 같은 방향에 위치함.
[규칙 5] 최근 6 샘플이 연속으로 증가 (또는 감소) 함.
[규칙 6] 최근 8 샘플이 연속으로 '평균 +- 표준편차' 범위를 벗어남. """
ruleset['num_rules'] = 6
ruleset['window_size'] = 9
elif num == 6 :
ruleset['name'] = u'Gitlow Rule'
ruleset['description'] = u""" * Gitlow 규칙세트가 포함하는 아래 규칙 중 하나라도 위반할 경우 이상으로 판단함.
[규칙 1] 최근 샘플이 '평균 + 3*표준편차' 범위를 벗어남.
[규칙 2] 최근 3 샘플 중 2개 이상이 같은 방향으로 '평균 +- 2*표준편차' 범위를 벗어남.
[규칙 3] 최근 5 샘플 중 4개 이상이 같은 방향으로 '평균 +- 표준편차' 범위를 벗어남.
[규칙 4] 최근 8 샘플이 연속으로 평균을 기준으로 같은 방향에 위치함.
[규칙 5] 최근 8 샘플이 연속으로 증가 (또는 감소) 함.
"""
ruleset['num_rules'] = 5
ruleset['window_size'] = 8
elif num == 7 :
ruleset['name'] = u'Duncan Rule'
ruleset['description'] = u""" * Duncan 규칙세트가 포함하는 아래 규칙 중 하나라도 위반할 경우 이상으로 판단함.
[규칙 1] 최근 샘플이 '평균 + 3*표준편차' 범위를 벗어남.
[규칙 2] 최근 3 샘플 중 2개 이상이 같은 방향으로 '평균 +- 2*표준편차' 범위를 벗어남.
[규칙 3] 최근 5 샘플 중 4개 이상이 같은 방향으로 '평균 +- 표준편차' 범위를 벗어남.
[규칙 4] 최근 7 샘플이 연속으로 증가 (또는 감소) 함."""
ruleset['num_rules'] = 4
ruleset['window_size'] = 7
elif num == 8 :
ruleset['name'] = u'Westgard Rule'
ruleset['description'] = u""" * Westgard 규칙세트가 포함하는 아래 규칙 중 하나라도 위반할 경우 이상으로 판단함.
[규칙 1] 최근 샘플이 '평균 + 3*표준편차' 범위를 벗어남.
[규칙 2] 최근 2 샘플이 같은 방향으로 '평균 +- 2*표준편차' 범위를 벗어남.
[규칙 3] 최근 4 샘플이 같은 방향으로 '평균 +- 표준편차' 범위를 벗어남.
[규칙 4] 최근 10 샘플이 연속으로 평균을 기준으로 같은 방향에 위치함.
[규칙 5] 최근 8 샘플이 연속으로 증가 (또는 감소) 함.
[규칙 6] 최근 2 샘플이 서로 반대의 평균 +- 2*표준편차' 범위를 벗어남."""
ruleset['num_rules'] = 6
ruleset['window_size'] = 10
elif num == 9 : # 보류
ruleset['name'] = u'AIAG 규칙세트'
ruleset['description'] = u""" * Automotive Industry Action Group 규칙세트가 포함하는 아래 규칙 중 하나라도 위반할 경우 이상으로 판단함.
[규칙 1] 모든 샘플은 '평균 + 3*표준편차' 범위를 벗어나지 않음.
[규칙 2] 7 샘플이 연속으로 평균을 기준으로 같은 방향에 위치하지 않음.
[규칙 3] 7 샘플이 연속으로 증가 (또는 감소) 하지 않음. """
ruleset['num_rules'] = 3
ruleset['window_size'] = 7
elif num ==0 :
ruleset['name'] = u'basic 규칙'
ruleset['description'] = u""" * basic 규칙세트가 포함하는 아래 규칙 중 하나라도 해당 할 경우 이상으로 판단함.
[규칙 1] 모든 샘플은 '평균 +- 3*표준편차' 범위를 벗어남. """
ruleset['num_rules'] = 1
ruleset['window_size'] = 15
return ruleset
def new_gaussmodel(old_gauss,sample_value) :
gauss = pd.DataFrame({'mean':old_gauss['mean'], 'stddev':old_gauss['stddev'], 'examined_sample_counter':old_gauss['examined_sample_counter']})
if gauss.loc[0,'examined_sample_counter'] == 0 :
gauss.loc[0,'examined_sample_counter'] = 1
model_update_counter = gauss.loc[0,'examined_sample_counter']
learningrate = 1.0/model_update_counter
gauss.loc[0,'mean'] = sample_value
else :
# elif ( gauss['stddev'][0] == 0 ) | (gauss['mean'][0] - 6.0 *gauss['stddev'][0] <= sample_value[0] <= gauss['mean'][0] + 6.0 *gauss['stddev'][0] ) | (gauss['num_examined_samples']<15)):
gauss.loc[0,'examined_sample_counter'] = gauss.loc[0,'examined_sample_counter'] + 1
model_update_counter = gauss.loc[0,'examined_sample_counter']
learningrate = 1.0/model_update_counter
gauss.loc[0,'mean'] = ((1.0-learningrate) * gauss.loc[0,'mean']) + ( learningrate * sample_value )
if abs(sample_value - gauss.loc[0,'mean'])>0.000001 :
variance = gauss.loc[0,'stddev'] *gauss.loc[0,'stddev']
variance = ((1.0-learningrate) * variance) + (learningrate * math.pow(sample_value-gauss.loc[0,'mean'],2.0))
gauss.loc[0,'stddev'] = math.sqrt(variance)
return gauss
def same_side_in_a_row(queue,num) : # 해당 sample 포함해서 일렬로
# 1 (positive sisade) / 0 (mean) / -1 (negative side)
if (queue[-num:] == queue.loc[queue.index.max()]).all()[0] :
return 1 # in a row : fail
else :
return 0 # not in a row : pass
def different_side_in_a_row(queue,num) : # /\/\/\/\
# 1 (positive side) / 0 (mean) / -1 (negative side)
length = len(queue)
queue = queue.value.tolist()
for i in range(0,num-1) :
if queue[length-1-i] == queue[length-2-i] : # same side
return 0
elif queue[length-1-i] == 0 : ## middle (mean을 기준으로 왔다갔다해야하는데 가운데일경우)
return 0
elif queue[length-2-i] == 0 : ## middle (mean을 기준으로 왔다갔다해야하는데 가운데일경우)
return 0
else :
pass # different side
return 1 # success
def same_side_in_a_row_n1_of_n2(queue,limit,num) : # num 중에 limit개 확인
# 1 (positive side) / 0 (mean) / -1 (negative side)
cnt=[0,0] # positive & negative side count
length = len(queue)
queue = queue.value.tolist()
for i in range(0,num) :
if queue[length-1-i] == 1 : # positive side count
cnt[0]+=1
elif queue[length-1-i] == -1 : # negative side count
cnt[1]+=1
if any(c >= limit for c in cnt) :
return 1 # exceed limit
return 0
def check_sign(x,y) : # y 를 기준으로 x의 부호 판별
if x > y :
return 1 # positive side
elif -y <= x <= y :
return 0 # equal
else :
return -1 # negtive side
def basic(queue,gauss) : # Ruleset 0
violated_rule_flag = list('0') # 1 rules
queue = pd.DataFrame(queue)
mean = float(gauss['mean'])
stddev = float(gauss['stddev'])
queue_dev = queue-mean # deviation
# rule 1 Any single data point falling above the +3σ limit
if (3*stddev < abs(queue_dev.iloc[len(queue)-1])).value :
violated_rule_flag[0]='1'
return ''.join(violated_rule_flag)
def nelson(queue,gauss) : # Ruleset 1
violated_rule_flag = list('00000000') # 8 rules
queue = pd.DataFrame(queue)
mean = float(gauss['mean'])
stddev = float(gauss['stddev'])
queue_dev = queue-mean # deviation
queue_sign = np.sign(queue_dev) # mean 기준 부호
queue_outofsig = queue_dev.applymap(lambda x : check_sign(x,stddev)) # 1 sigma 기준 부호
queue_outof2sig = queue_dev.applymap(lambda x : check_sign(x,2*stddev)) # 2 sigma 기준 부호
queue_diff = queue.diff() # difference 두 값의 차이
queue_incdec = np.sign(queue_diff) # is increasing or decreasing? 두 값차이의 부호 = 증감여부
# rule 1 One point is more than 3 standard deviations from the mean.
# 측정값이 3시그마를 벗어난경우 허용 범위를 이탈함
# if not (-3*stddev < queue_dev.iloc[len(queue)-1] < 3*stddev)
if (3*stddev < abs(queue_dev.iloc[len(queue)-1])).value : # 3시그마 벗어난 경우
violated_rule_flag[0]='1'
# rule 2 Two (or three) out of three points in a row are more than 2 standard deviations from the mean in the same direction.
# 연속된 3개의 측정값 중 2개 이상이 평균 기준 한방향으로 2시그마 이상인경우, 허용범위를 중간적으로 이탈
if same_side_in_a_row_n1_of_n2(queue_outof2sig,2,3) :
violated_rule_flag[1]='1'
# rule 3 Four (or five) out of five points in a row are more than 1 standard deviation from the mean in the same direction.
# 연속된 5개의 측정값 중 4개 이상이 평균 기준 한방향으로 1시그마 이상인경우, 허용범위를 약간 이탈
if same_side_in_a_row_n1_of_n2(queue_outofsig,4,5) :
violated_rule_flag[2]='1'
# rule 4 Nine (or more) points in a row are on the same side of the mean.
# 9개 이상의 측정값이 연속으로 평균위거나 아래인경우 지속적인 편향 존재
if same_side_in_a_row(queue_sign,9) :
violated_rule_flag[3]='1'
# rule 5 Six (or more) points in a row are continually increasing (or decreasing).
# 6개 이상의 측정값이 연속으로 증가하거나 감소하는 경우 이상 추세가 존재
if same_side_in_a_row(queue_incdec,5) :
violated_rule_flag[4]='1'
# rule 6 Fifteen points in a row are all within 1 standard deviation of the mean on either side of the mean.
# 15개의 측정 값이 연속으로 1시그마 선 안에 있는 경우 변화가 적으므로 이상함
if same_side_in_a_row(abs(queue_dev) < stddev,15) :
violated_rule_flag[5]='1'
# rule 7 Fourteen (or more) points in a row alternate in direction, increasing then decreasing.
# 연속된 14개 이상의 점이 증감반복 오가는것은 오차 범위의 이상
if different_side_in_a_row(queue_incdec,13) :
violated_rule_flag[6]='1'
# rule 8 Eight points in a row exist, but none within 1 standard deviation of the mean, and the points are in both directions from the mean.
# 연속된 8개의 측정값 중 하나도 1시그마 안에 존재하지 않는 경우가 거의 없으므로 이상함
if same_side_in_a_row(abs(queue_dev) > stddev,8) :
violated_rule_flag[7]='1'
return ''.join(violated_rule_flag)
def WE(queue,gauss) : # Ruleset 2
violated_rule_flag = list('0000') # 4 rules
queue = pd.DataFrame(queue)
mean = float(gauss['mean'])
stddev = float(gauss['stddev'])
queue_dev = queue-mean # deviation
queue_sign = np.sign(queue_dev)
queue_outofsig = queue_dev.applymap(lambda x : check_sign(x,stddev))
queue_outof2sig = queue_dev.applymap(lambda x : check_sign(x,2*stddev))
# rule 1 Any single data point falls outside the 3σ-limit from the centerline (i.e., any point that falls outside Zone A, beyond either the upper or lower control limit)
#if not ((-3*stddev < queue_dev.iloc[len(queue)-1] ) & (queue.iloc[len(queue)-1]< 3*stddev)).all :
if (3*stddev < abs(queue_dev.iloc[len(queue)-1])).value :
violated_rule_flag[0]='1'
# rule 2 Two out of three consecutive points fall beyond the 2σ-limit (in zone A or beyond), on the same side of the centerline
# 연속된 3개의 측정값 중 2개 이상이 평균 기준 한방향으로 2시그마 이상인경우, 허용범위를 중간적으로 이탈
if same_side_in_a_row_n1_of_n2(queue_outof2sig,2,3) :
violated_rule_flag[1]='1'
# rule 3 Four out of five consecutive points fall beyond the 1σ-limit (in zone B or beyond), on the same side of the centerline
# 연속된 5개의 측정값 중 4개 이상이 평균 기준 한방향으로 1시그마 이상인경우, 허용범위를 약간 이탈
if same_side_in_a_row_n1_of_n2(queue_outofsig,4,5) :
violated_rule_flag[2]='1'
# rule 4 eight consecutive points fall on the same side of the centerline (in zone C or beyond)
# 8개 이상의 측정값이 연속으로 평균위거나 아래인경우 지속적인 편향 존재
if same_side_in_a_row(queue_sign,8) :
violated_rule_flag[3]='1'
return ''.join(violated_rule_flag)
def WE_supple(queue,gauss) : # Ruleset 3
violated_rule_flag = list('00000000') # 8 rules
queue = pd.DataFrame(queue)
mean = float(gauss['mean'])
stddev = float(gauss['stddev'])
queue_dev = queue-mean # deviation
queue_sign = np.sign(queue_dev)
queue_outofsig = queue_dev.applymap(lambda x : check_sign(x,stddev))
queue_outof2sig = queue_dev.applymap(lambda x : check_sign(x,2*stddev))
queue_diff = queue.diff() # difference
queue_incdec = np.sign(queue_diff) # is increasing or decreasing?
# rule 1 Any single data point falls outside the 3σ-limit from the centerline (i.e., any point that falls outside Zone A, beyond either the upper or lower control limit)
# 1. The most recent point plots outside one of the 3-sigma control limits.
# If a point lies outside either of these limits, there is only a 0.3% chance that this was caused by the normal process.
if (3*stddev < abs(queue_dev.iloc[len(queue)-1])).value :
violated_rule_flag[0]='1'
# rule 2 Two out of three consecutive points fall beyond the 2σ-limit (in zone A or beyond), on the same side of the centerline
# 2. Two of the three most recent points plot outside and on the same side as one of the 2-sigma control limits.
# The probability that any point will fall outside the warning limit is only 5%.
# The chances that two out of three points in a row fall outside the warning limit is only about 1%.
# 연속된 3개의 측정값 중 2개 이상이 평균 기준 한방향으로 2시그마 이상인경우, 허용범위를 중간적으로 이탈
if same_side_in_a_row_n1_of_n2(queue_outof2sig,2,3) :
violated_rule_flag[1]='1'
# rule 3 Four out of five consecutive points fall beyond the 1σ-limit (in zone B or beyond), on the same side of the centerline
# 3. Four of the five most recent points plot outside and on the same side as one of the 1-sigma control limits.
# In normal processing, 68% of points fall within one sigma of the mean, and 32% fall outside it.
# The probability that 4 of 5 points fall outside of one sigma is only about 3%.
# 연속된 5개의 측정값 중 4개 이상이 평균 기준 한방향으로 1시그마 이상인경우, 허용범위를 약간 이탈
if same_side_in_a_row_n1_of_n2(queue_outofsig,4,5) :
violated_rule_flag[2]='1'
# rule 4 Nine consecutive points fall on the same side of the centerline (in zone C or beyond)
# 4. Eight out of the last eight points plot on the same side of the center line, or target value.
# Sometimes you see this as 9 out of 9, or 7 out of 7. There is an equal chance that any given point will fall above or below the mean.
# The chances that a point falls on the same side of the mean as the one before it is one in two.
# The odds that the next point will also fall on the same side of the mean is one in four.
# The probability of getting eight points on the same side of the mean is only around 1%.
# 8개 이상의 측정값이 연속으로 평균위거나 아래인경우 지속적인 편향 존재
if same_side_in_a_row(queue_sign,8) :
violated_rule_flag[3]='1'
# rule 5 Six points in a row increasing or decreasing.
# The same logic is used here as for rule 4 above. Sometimes this rule is changed to seven points rising or falling.
# 6 샘플 연속 증가 또는 감소
if same_side_in_a_row(queue_incdec,5) :
violated_rule_flag[4]='1'
# 6. Fifteen points in a row within one sigma. In normal operation, 68% of points will fall within one sigma of the mean.
# The probability that 15 points in a row will do so, is less than 1%.
# 15샘플 연속으로 1sigma 이내에만 존재
if same_side_in_a_row(abs(queue_dev) < stddev,15) :
violated_rule_flag[5]='1'
#7. Fourteen points in a row alternating direction.
# The chances that the second point is always higher than (or always lower than) the preceding point, for all seven pairs is only about 1%.
# 14샘플 연속으로 전환
if different_side_in_a_row(queue_incdec,13) :
violated_rule_flag[6]='1'
#8. Eight points in a row outside one sigma.
# Since 68% of points lie within one sigma of the mean, the probability that eight points in a row fall outside of the one-sigma line is less than 1%.
# 8포인트 연속으로 1sig안에 조재하지 않음
if same_side_in_a_row(abs(queue_dev) > stddev,8) :
violated_rule_flag[7]='1'
return ''.join(violated_rule_flag)
def WE_asymm(queue,gauss) : # Ruleset 4
violated_rule_flag = list('0000000') # 7 rules
queue = pd.DataFrame(queue)
mean = float(gauss['mean'])
stddev = float(gauss['stddev'])
queue_dev = queue-mean # deviation
queue_sign = np.sign(queue_dev)
queue_diff = queue.diff() # difference
queue_incdec = np.sign(queue_diff) # is increasing or decreasing?
# rule 1 Any single data point falling above the +3σ limit
#if not (queue.iloc[len(queue)-1]< 3*stddev).all :
if (3*stddev < (queue_dev.iloc[len(queue)-1])).value :
violated_rule_flag[0]='1'
# rule 2 Two consecutive points falling above the +2σ-limit (in the upper zone A or above)
if same_side_in_a_row(queue_dev > 2*stddev,2) :
violated_rule_flag[1]='1'
# rule 3 Three consecutive points falling above the +1σ-limit (in the upper zone B or above)
if same_side_in_a_row(queue_dev > stddev,3) :
violated_rule_flag[2]='1'
# rule 4 Seven consecutive points falling above the centerline (in the upper zone C or above)
if same_side_in_a_row(queue_dev > 0 ,7) :
violated_rule_flag[3]='1'
# rule 5 Ten consecutive points falling below the centerline (in the lower zone C or below)
if same_side_in_a_row(queue_dev < 0 ,10) :
violated_rule_flag[4]='1'
# rule 6 Six consecutive points falling below the -1σ-limit (in the lower zone B or below)
if same_side_in_a_row(queue_dev < -stddev ,6) :
violated_rule_flag[5]='1'
# rule 7 Four consecutive points falling below the -2σ-limit (in the lower zone A)
if same_side_in_a_row(queue_dev < -2*stddev ,4) :
violated_rule_flag[6]='1'
return ''.join(violated_rule_flag)
def Juran(queue,gauss) : # Ruleset 5
violated_rule_flag = list('000000') # 6 rules
queue = pd.DataFrame(queue)
mean = float(gauss['mean'])
stddev = float(gauss['stddev'])
queue_dev = queue-mean # deviation
queue_sign = np.sign(queue_dev) # mean 기준 부호
queue_outofsig = queue_dev.applymap(lambda x : check_sign(x,stddev)) # 1 sigma 기준 부호
queue_outof2sig = queue_dev.applymap(lambda x : check_sign(x,2*stddev)) # 2 sigma 기준 부호
queue_diff = queue.diff() # difference 두 값의 차이
queue_incdec = np.sign(queue_diff) # is increasing or decreasing? 두 값차이의 부호 = 증감여부
if (3*stddev < abs(queue_dev.iloc[len(queue)-1])).value :
violated_rule_flag[0]='1'
# 연속된 3개의 측정값 중 2개 이상이 평균 기준 한방향으로 2시그마 이상인경우,
if same_side_in_a_row_n1_of_n2(queue_outof2sig,2,3) :
violated_rule_flag[1]='1'
# 연속된 5개의 측정값 중 4개 이상이 평균 기준 한방향으로 1시그마 이상인경우,
if same_side_in_a_row_n1_of_n2(queue_outofsig,4,5) :
violated_rule_flag[2]='1'
# 9개 이상의 측정값이 연속으로 평균위거나 아래인경우 지속적인 편향 존재
if same_side_in_a_row(queue_sign,9) :
violated_rule_flag[3]='1'
# 6개 이상의 측정값이 연속으로 증가하거나 감소하는 경우 이상 추세가 존재
if same_side_in_a_row(queue_incdec,5) :
violated_rule_flag[4]='1'
# 8포인트 연속으로 1sig안에 조재하지 않음
if same_side_in_a_row(abs(queue_dev) > stddev,8) :
violated_rule_flag[5]='1'
return ''.join(violated_rule_flag)
def Gitlow(queue,gauss) : #Ruleset 6
violated_rule_flag = list('00000') # 5 rules
queue = pd.DataFrame(queue)
mean = float(gauss['mean'])
stddev = float(gauss['stddev'])
queue_dev = queue-mean # deviation
queue_sign = np.sign(queue_dev) # mean 기준 부호
queue_outofsig = queue_dev.applymap(lambda x : check_sign(x,stddev)) # 1 sigma 기준 부호
queue_outof2sig = queue_dev.applymap(lambda x : check_sign(x,2*stddev)) # 2 sigma 기준 부호
queue_diff = queue.diff() # difference 두 값의 차이
queue_incdec = np.sign(queue_diff) # is increasing or decreasing? 두 값차이의 부호 = 증감여부
if (3*stddev < abs(queue_dev.iloc[len(queue)-1])).value :
violated_rule_flag[0]='1'
# 연속된 3개의 측정값 중 2개 이상이 평균 기준 한방향으로 2시그마 이상인경우, 허용범위를 중간적으로 이탈
if same_side_in_a_row_n1_of_n2(queue_outof2sig,2,3) :
violated_rule_flag[1]='1'
# 연속된 5개의 측정값 중 4개 이상이 평균 기준 한방향으로 1시그마 이상인경우, 허용범위를 약간 이탈
if same_side_in_a_row_n1_of_n2(queue_outofsig,4,5) :
violated_rule_flag[2]='1'
# 8개 이상의 측정값이 연속으로 평균위거나 아래인경우 지속적인 편향 존재
if same_side_in_a_row(queue_sign,8) :
violated_rule_flag[3]='1'
# 8개 이상의 측정값이 연속으로 증가하거나 감소하는 경우 이상 추세가 존재
if same_side_in_a_row(queue_incdec,7) :
violated_rule_flag[4]='1'
return ''.join(violated_rule_flag)
def Duncan(queue,gauss) : #Ruleset 7
violated_rule_flag = list('0000') # 4 rules
queue = pd.DataFrame(queue)
mean = float(gauss['mean'])
stddev = float(gauss['stddev'])
queue_dev = queue-mean # deviation
queue_outofsig = queue_dev.applymap(lambda x : check_sign(x,stddev)) # 1 sigma 기준 부호
queue_outof2sig = queue_dev.applymap(lambda x : check_sign(x,2*stddev)) # 2 sigma 기준 부호
queue_diff = queue.diff() # difference 두 값의 차이
queue_incdec = np.sign(queue_diff) # is increasing or decreasing? 두 값차이의 부호 = 증감여부
if (3*stddev < abs(queue_dev.iloc[len(queue)-1])).value :
violated_rule_flag[0]='1'
# 연속된 3개의 측정값 중 2개 이상이 평균 기준 한방향으로 2시그마 이상인경우, 허용범위를 중간적으로 이탈
if same_side_in_a_row_n1_of_n2(queue_outof2sig,2,3) :
violated_rule_flag[1]='1'
# 연속된 5개의 측정값 중 4개 이상이 평균 기준 한방향으로 1시그마 이상인경우, 허용범위를 약간 이탈
if same_side_in_a_row_n1_of_n2(queue_outofsig,4,5) :
violated_rule_flag[2]='1'
# 7개 이상의 측정값이 연속으로 증가하거나 감소하는 경우 이상 추세가 존재
if same_side_in_a_row(queue_incdec,6) :
violated_rule_flag[3]='1'
return ''.join(violated_rule_flag)
def Westgard(queue,gauss) : # Ruleset 8
violated_rule_flag = list('000000') # 6 rules
queue = | pd.DataFrame(queue) | pandas.DataFrame |
import pandas as pd
from sqlalchemy import create_engine
import sqlite3
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import bar_chart_race as bcr
import streamlit as st
import ffmpeg
import rpy2.robjects as ro
from math import pi
from rpy2.robjects import pandas2ri
from rpy2.robjects.conversion import localconverter
with st.echo(code_location="below"):
st.title('''
Spotify trends
''')
st.write('''
Добрый день, коллега. Сегодня мы будем работать с базой данных Spotify, которая лежит на kaggle.
В рамках этого дэшборда мы познакомимся с самим датасетом и попытаемся сделать какие-нибудь выводы о развитии музыки.
Было бы здорово, если ты бы сейчас открыл свой любимый музыкальный сервис, включил наушники и понаслаждался треками,
которые будут упоминаться в этом небольшом исследовании)
''')
st.write('''
Датасет слишком большой, поэтому я приложил в файл свой файл zip с датасетами. Нужно его вложить в одну папку с demo_app.py
Если хероку не сработает, то можно ввести streamlit run demo_app.py в терминал этого файла, открытый в PyCharm.
''')
st.write('''
Для начала я проведу небольшую "чистку" данных. А именно уберу лайвы от музыкантов, чтобы нам было чуть-чуть удобнее
и ничего не могло сильно испортить наши данные.
''')
spotify_track_data = pd.read_csv("tracks.csv")
spotify_track_data.head()
engine = create_engine('sqlite://', echo=False)
spotify_track_data.to_sql('tracks', con=engine)
engine.execute('''
select count (id)
from tracks
''').fetchall()
engine.execute('''
select count (id)
from tracks
where name like '%(Live%'
''').fetchall()
engine.execute('''
delete
from tracks
where name like '%(Live'
''')
rows = engine.execute('''
select *
from tracks
''').fetchall()
spotify_track_data = pd.DataFrame(list(rows))
spotify_track_data.columns = ['index','id', 'name', 'popularity',
'duration_ms', 'explicit', 'artists',
'id_artists', 'release_date', 'danceability',
'energy', 'key', 'loudness',
'mode', 'speechiness', 'acousticness',
'instrumentalness', 'liveness', 'valence',
'tempo', 'time_signature']
spotify_track_data.artists = spotify_track_data.artists.replace('['']', np.nan)
spotify_track_data.release_date = | pd.to_datetime(spotify_track_data.release_date) | pandas.to_datetime |
import os
import sys
import subprocess
from tqdm import tqdm
from Bio.Seq import Seq
from Bio import SeqIO, SearchIO
from Bio.SeqRecord import SeqRecord
from Bio.Blast.Applications import NcbiblastpCommandline
from src.python.preprocess2 import *
from itertools import cycle
import matplotlib.pyplot as plt
from pymongo import MongoClient
from tempfile import gettempdir
tmp_dir = gettempdir()
from concurrent.futures import ThreadPoolExecutor
import argparse
ASPECT = 'F'
ONTO = None
PRIOR = None
THRESHOLDS = np.arange(.05, 1, .05)
cleanup = True
eps = 10e-6
def init_GO(asp=ASPECT, src=None):
global ONTO, ASPECT
if src: set_obo_src(src)
ASPECT = asp
ONTO = get_ontology(asp)
return ONTO
def add_arguments(parser):
parser.add_argument("--mongo_url", type=str, default='mongodb://localhost:27017/',
help="Supply the URL of MongoDB")
def load_all_data():
mf, _ = load_data(db, asp='F', codes=exp_codes)
cc, _ = load_data(db, asp='C', codes=exp_codes)
bp, _ = load_data(db, asp='P', codes=exp_codes)
return mf, cc, bp
def _prepare_naive(reference):
global PRIOR
prior_pth = os.path.join(tmp_dir, 'prior-%s.npy' % GoAspect(ASPECT))
if os.path.exists(prior_pth):
PRIOR = np.load(prior_pth).item()
go2count = {}
for _, go_terms in reference.items():
for go in go_terms:
if go in go2count:
go2count[go] += 1
else:
go2count[go] = 1
total = len(reference)
prior = {go: count/total for go, count in go2count.items()}
np.save(prior_pth, prior)
PRIOR = prior
def _naive(target, reference):
global PRIOR
if not PRIOR:
_prepare_naive(reference)
return PRIOR
def _prepare_blast(sequences):
# print('### entering _prepare_blast')
blastdb_pth = os.path.join(tmp_dir, 'blast-%s' % GoAspect(ASPECT))
records = [SeqRecord(Seq(seq), id) for id, seq in sequences.items()]
SeqIO.write(records, open(blastdb_pth, 'w+'), "fasta")
os.system("makeblastdb -in %s -dbtype prot" % blastdb_pth)
def parallel_blast(targets, reference, num_cpu=4):
blastdb_pth = os.path.join(tmp_dir, 'blast-%s' % GoAspect(ASPECT))
records = [SeqRecord(Seq(seq), id) for id, seq in reference.items()]
SeqIO.write(records, open(blastdb_pth, 'w+'), "fasta")
os.system("makeblastdb -in %s -dbtype prot" % blastdb_pth)
predictions = dict()
e = ThreadPoolExecutor(num_cpu)
def _parallel_blast_helper(s):
return s[0], _blast(SeqRecord(Seq(s[1]), s[0]), reference, topn=None, choose_max_prob=True)
pbar = tqdm(range(len(targets)), desc="blast2go processed")
for tgtid, preds in e.map(_parallel_blast_helper, targets.items()):
predictions[tgtid] = preds
pbar.update(1)
pbar.close()
return predictions
def _blast(target_fasta, reference, topn=None, choose_max_prob=True):
seqid, asp = target_fasta.id, GoAspect(ASPECT)
query_pth = os.path.join(tmp_dir, "%s-%s.fas" % (seqid, asp))
output_pth = os.path.join(tmp_dir, "%s-%s.out" % (seqid, asp))
database_pth = os.path.join(tmp_dir, 'blast-%s' % asp)
SeqIO.write(target_fasta, open(query_pth, 'w+'), "fasta")
cline = NcbiblastpCommandline(query=query_pth, db=database_pth, out=output_pth,
outfmt=5, evalue=0.001, remote=False, ungapped=False)
child = subprocess.Popen(str(cline),
stderr=subprocess.PIPE,
universal_newlines=True,
shell=(sys.platform != "win32"))
handle, _ = child.communicate()
assert child.returncode == 0
blast_qresult = SearchIO.read(output_pth, 'blast-xml')
annotations = {}
for hsp in blast_qresult.hsps[:topn]:
if hsp.hit.id == seqid:
continue
ident = hsp.ident_num / hsp.hit_span
for go in reference[hsp.hit.id]:
if go in annotations:
annotations[go].append(ident)
else:
annotations[go] = [ident]
for go, ps in annotations.items():
if choose_max_prob:
annotations[go] = max(ps)
else:
annotations[go] = 1 - np.prod([(1 - p) for p in ps])
if cleanup:
os.remove(query_pth)
os.remove(output_pth)
return annotations
def _predict(reference_annots, target_seqs, func_predict, binary_mode=False):
if len(target_seqs) > 1:
pbar = tqdm(range(len(target_seqs)), desc="targets processed")
else:
pbar = None
if binary_mode:
predictions = np.zeros((len(target_seqs), len(ONTO.classes)))
for i, (_, seq) in enumerate(target_seqs.items()):
preds = func_predict(seq, reference_annots)
bin_preds = ONTO.binarize([list(preds.keys())])[0]
for go, prob in preds.items():
bin_preds[ONTO[go]] = prob
predictions[i, :] = bin_preds
if pbar: pbar.update(1)
else:
predictions = {}
for _, (seqid, seq) in enumerate(target_seqs.items()):
predictions[seqid] = func_predict(SeqRecord(Seq(seq), seqid), reference_annots)
if pbar: pbar.update(1)
if pbar: pbar.close()
return predictions
def bin2dict(distribution, classes):
return {classes[i]: prob for i, prob in enumerate(distribution)}
def get_P_and_T_from_dictionaries(tau, predictions, targets):
assert len(predictions) == len(targets)
P, T = [], []
for seqid, seq_targets in targets.items():
assert len(seq_targets) > 0
seq_preds = predictions[seqid]
seq_annots = [go for go, prob in seq_preds.items() if prob >= tau]
P.append(set(seq_annots))
T.append(set(seq_targets))
assert len(P) == len(T)
return P, T
def get_P_and_T_from_arrays(tau, predictions, targets, classes):
assert len(predictions) == len(targets)
P, T = [], []
classes_arr = np.asarray(classes)
for prob_arr in map(lambda p: np.asarray(p), predictions):
annots = classes_arr[prob_arr >= tau]
P.append(set(annots))
for prob_arr in map(lambda t: np.asarray(t), targets):
annots = classes_arr[prob_arr == 1.0]
assert len(annots) == sum(prob_arr)
T.append(set(annots))
assert len(P) == len(T)
return P, T
def precision(tau, predictions, targets, classes=None):
assert type(predictions) == type(targets)
if isinstance(predictions, dict):
P, T = get_P_and_T_from_dictionaries(tau, predictions, targets)
else:
assert classes
P, T = get_P_and_T_from_arrays(tau, predictions, targets, classes)
ret = [(len(P_i & T_i) / len(P_i)) if len(P_i) else 1.0 for P_i, T_i in zip(P, T)]
return ret
def recall(tau, predictions, targets, classes=None, partial_evaluation=False):
assert type(predictions) == type(targets)
if isinstance(predictions, dict):
P, T = get_P_and_T_from_dictionaries(tau, predictions, targets)
else:
assert classes
P, T = get_P_and_T_from_arrays(tau, predictions, targets, classes)
if partial_evaluation:
P, T = zip(*[(P_i, T_i) for P_i, T_i in zip(P, T) if len(P_i) > 0])
ret = [(len(P_i & T_i) / len(T_i)) if len(P_i) else 0.0 for P_i, T_i in zip(P, T)]
return ret
def F_beta(pr, rc, beta=1):
pr = max(pr, eps)
rc = max(rc, eps)
return (1 + beta ** 2) * ((pr * rc) / (((beta ** 2) * pr) + rc))
def F1(pr, rc):
return F_beta(pr, rc, beta=1)
def predict(reference_seqs, reference_annots, target_seqs, method, basename=""):
filename = "%s_%s.npy" % (method, basename)
if method == "blast":
pred_path = os.path.join(tmp_dir, filename)
if basename and os.path.exists(pred_path):
return np.load(pred_path).item()
_prepare_blast(reference_seqs)
predictions = _predict(reference_annots, target_seqs, _blast)
np.save(pred_path, predictions)
return predictions
elif method == "naive":
_prepare_naive(reference_annots)
predictions = _predict(reference_annots, target_seqs, _naive)
return predictions
elif method == "deepseq":
pred_path = os.path.join(tmp_dir, filename)
return np.load(pred_path).item()
elif method == "seq2go":
pred_path = os.path.join(tmp_dir, filename)
return np.load(pred_path).item()
elif method == "seq2go-proba":
pred_path = os.path.join(tmp_dir, filename)
return np.load(pred_path).item()
else:
print("Unknown method")
def performance(predictions, ground_truth, classes=None, ths=THRESHOLDS):
prs, rcs, f1s = [], [], []
for tau in ths:
pr_per_seq = precision(tau, predictions, ground_truth, classes)
rc_per_seq = recall(tau, predictions, ground_truth, classes)
pr_tau = np.mean(pr_per_seq)
rc_tau = np.mean(rc_per_seq)
prs.append(pr_tau)
rcs.append(rc_tau)
f1s.append(np.mean(F1(pr_tau, rc_tau)))
return ths, prs, rcs, f1s
def plot_precision_recall(perf):
# Plot Precision-Recall curve
lw, n = 2, len(perf)
methods = list(perf.keys())
prs = [v[1] for v in perf.values()]
rcs = [v[2] for v in perf.values()]
f1s = [v[3] for v in perf.values()]
colors = cycle(['red', 'blue', 'navy', 'turquoise', 'darkorange', 'cornflowerblue', 'teal'])
# Plot Precision-Recall curve for each class
plt.clf()
for i, color in zip(range(len(methods)), colors):
plt.plot(rcs[i], prs[i], color=color, lw=lw,
label='{0} (F_max = {1:0.2f})'
.format(methods[i], max(f1s[i])))
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title(GoAspect(ASPECT))
plt.legend(loc="lower right")
plt.show()
def evaluate_performance(db, methods, asp='F', train_and_validation_data=None, filename=None, plot=1):
onto = init_GO(asp)
if train_and_validation_data:
seqs_train, annots_train, seqs_valid, annots_valid = train_and_validation_data
else:
seqs_train, annots_train, seqs_valid, annots_valid = load_training_and_validation(db, None)
annots_train = propagate_labels(annots_train, onto, include_root=False)
annots_valid = propagate_labels(annots_valid, onto, include_root=False)
perf = {}
for meth in methods:
pred = predict(seqs_train, annots_train, seqs_valid, meth, filename)
perf[meth] = performance(pred, annots_valid)
if plot == 1:
plot_precision_recall(perf)
return pred, perf
def product_of_experts(*predictions):
def go2p2go2ps(go2p_arr):
go2ps = dict()
for go2p in go2p_arr:
for go, prob in go2p.items():
if go in go2ps:
go2ps[go].append(prob)
else:
go2ps[go] = [prob]
return go2ps
poe = dict()
for pred in predictions:
for seqid, go2prob in pred.items():
if seqid in poe:
poe[seqid].append(pred[seqid])
else:
poe[seqid] = [pred[seqid]]
for seqid, arr in poe.items():
poe[seqid] = go2p2go2ps(arr)
for seqid, go2prob in poe.items():
for go, ps in go2prob.items():
poe[seqid][go] = 1 - np.prod([(1 - p) for p in ps])
return poe
if __name__ == "__main__":
parser = argparse.ArgumentParser()
add_arguments(parser)
args = parser.parse_args()
client = MongoClient(args.mongo_url)
db = client['prot2vec']
lim = 100
init_GO(ASPECT)
t0 = datetime(2017, 1, 1, 0, 0)
t1 = datetime.utcnow()
seqs_train, annots_train, seqs_valid, annots_valid = load_training_and_validation(db, t0, t1, ASPECT, lim)
predictions_blast = predict(seqs_train, annots_train, seqs_valid, "blast")
ths, prs, rcs, f1s = performance(predictions_blast, annots_valid)
import json
print(json.dumps(predictions_blast, indent=1))
print(json.dumps(annots_valid, indent=1))
import pandas as pd
print( | pd.DataFrame({"Threshold": ths, "Precision": prs, "Recall": rcs, "F1": f1s}) | pandas.DataFrame |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": pandas.StringDtype(),
"BitErrorsHost44": pandas.StringDtype(),
"BitErrorsHost45": pandas.StringDtype(),
"BitErrorsHost46": pandas.StringDtype(),
"BitErrorsHost47": pandas.StringDtype(),
"BitErrorsHost48": pandas.StringDtype(),
"BitErrorsHost49": pandas.StringDtype(),
"BitErrorsHost50": pandas.StringDtype(),
"BitErrorsHost51": pandas.StringDtype(),
"BitErrorsHost52": pandas.StringDtype(),
"BitErrorsHost53": pandas.StringDtype(),
"BitErrorsHost54": pandas.StringDtype(),
"BitErrorsHost55": pandas.StringDtype(),
"BitErrorsHost56": pandas.StringDtype(),
"mrrNearMiss": pandas.StringDtype(),
"mrrRereadAvg": pandas.StringDtype(),
"readDisturbEvictions": pandas.StringDtype(),
"L1L2ParityError": pandas.StringDtype(),
"pageDefects": pandas.StringDtype(),
"pageProvisionalTotal": pandas.StringDtype(),
"ASICTemp": pandas.StringDtype(),
"PMICTemp": pandas.StringDtype(),
"size": pandas.StringDtype(),
"lastWrite": pandas.StringDtype(),
"timesWritten": pandas.StringDtype(),
"maxNumContextBands": pandas.StringDtype(),
"blankCount": pandas.StringDtype(),
"cleanBands": pandas.StringDtype(),
"avgTprog": pandas.StringDtype(),
"avgEraseCount": pandas.StringDtype(),
"edtcHandledBandCnt": pandas.StringDtype(),
"bandReloForNLBA": pandas.StringDtype(),
"bandCrossingDuringPliCount": pandas.StringDtype(),
"bitErrBucketNum": pandas.StringDtype(),
"sramCorrectablesTotal": pandas.StringDtype(),
"l1SramCorrErrCnt": pandas.StringDtype(),
"l2SramCorrErrCnt": pandas.StringDtype(),
"parityErrorValue": pandas.StringDtype(),
"parityErrorType": pandas.StringDtype(),
"mrr_LutValidDataSize": pandas.StringDtype(),
"pageProvisionalDefects": pandas.StringDtype(),
"plisWithErasesInProgress": pandas.StringDtype(),
"lastReplayDebug": pandas.StringDtype(),
"externalPreReadFatals": pandas.StringDtype(),
"hostReadCmd": pandas.StringDtype(),
"hostWriteCmd": pandas.StringDtype(),
"trimmedSectors": pandas.StringDtype(),
"trimTokens": pandas.StringDtype(),
"mrrEventsInCodewords": pandas.StringDtype(),
"mrrEventsInSectors": pandas.StringDtype(),
"powerOnMicroseconds": pandas.StringDtype(),
"mrrInXorRecEvents": pandas.StringDtype(),
"mrrFailInXorRecEvents": pandas.StringDtype(),
"mrrUpperpageEvents": pandas.StringDtype(),
"mrrLowerpageEvents": pandas.StringDtype(),
"mrrSlcpageEvents": pandas.StringDtype(),
"mrrReReadTotal": pandas.StringDtype(),
"powerOnResets": pandas.StringDtype(),
"powerOnMinutes": pandas.StringDtype(),
"throttleOnMilliseconds": pandas.StringDtype(),
"ctxTailMagic": pandas.StringDtype(),
"contextDropCount": pandas.StringDtype(),
"lastCtxSequenceId": pandas.StringDtype(),
"currCtxSequenceId": pandas.StringDtype(),
"mbliEraseCount": pandas.StringDtype(),
"pageAverageProgramCount": pandas.StringDtype(),
"bandAverageEraseCount": pandas.StringDtype(),
"bandTotalEraseCount": pandas.StringDtype(),
"bandReloForXorRebuildFail": pandas.StringDtype(),
"defragSpeculativeMiss": pandas.StringDtype(),
"uncorrectableBackgroundScan": pandas.StringDtype(),
"BitErrorsHost57": pandas.StringDtype(),
"BitErrorsHost58": pandas.StringDtype(),
"BitErrorsHost59": pandas.StringDtype(),
"BitErrorsHost60": pandas.StringDtype(),
"BitErrorsHost61": pandas.StringDtype(),
"BitErrorsHost62": pandas.StringDtype(),
"BitErrorsHost63": pandas.StringDtype(),
"BitErrorsHost64": pandas.StringDtype(),
"BitErrorsHost65": pandas.StringDtype(),
"BitErrorsHost66": pandas.StringDtype(),
"BitErrorsHost67": pandas.StringDtype(),
"BitErrorsHost68": pandas.StringDtype(),
"BitErrorsHost69": pandas.StringDtype(),
"BitErrorsHost70": pandas.StringDtype(),
"BitErrorsHost71": pandas.StringDtype(),
"BitErrorsHost72": pandas.StringDtype(),
"BitErrorsHost73": pandas.StringDtype(),
"BitErrorsHost74": pandas.StringDtype(),
"BitErrorsHost75": pandas.StringDtype(),
"BitErrorsHost76": pandas.StringDtype(),
"BitErrorsHost77": pandas.StringDtype(),
"BitErrorsHost78": pandas.StringDtype(),
"BitErrorsHost79": pandas.StringDtype(),
"BitErrorsHost80": pandas.StringDtype(),
"bitErrBucketArray1": pandas.StringDtype(),
"bitErrBucketArray2": pandas.StringDtype(),
"bitErrBucketArray3": pandas.StringDtype(),
"bitErrBucketArray4": pandas.StringDtype(),
"bitErrBucketArray5": pandas.StringDtype(),
"bitErrBucketArray6": pandas.StringDtype(),
"bitErrBucketArray7": pandas.StringDtype(),
"bitErrBucketArray8": pandas.StringDtype(),
"bitErrBucketArray9": pandas.StringDtype(),
"bitErrBucketArray10": pandas.StringDtype(),
"bitErrBucketArray11": pandas.StringDtype(),
"bitErrBucketArray12": pandas.StringDtype(),
"bitErrBucketArray13": pandas.StringDtype(),
"bitErrBucketArray14": pandas.StringDtype(),
"bitErrBucketArray15": pandas.StringDtype(),
"bitErrBucketArray16": pandas.StringDtype(),
"bitErrBucketArray17": pandas.StringDtype(),
"bitErrBucketArray18": pandas.StringDtype(),
"bitErrBucketArray19": pandas.StringDtype(),
"bitErrBucketArray20": pandas.StringDtype(),
"bitErrBucketArray21": pandas.StringDtype(),
"bitErrBucketArray22": pandas.StringDtype(),
"bitErrBucketArray23": pandas.StringDtype(),
"bitErrBucketArray24": pandas.StringDtype(),
"bitErrBucketArray25": pandas.StringDtype(),
"bitErrBucketArray26": pandas.StringDtype(),
"bitErrBucketArray27": pandas.StringDtype(),
"bitErrBucketArray28": pandas.StringDtype(),
"bitErrBucketArray29": pandas.StringDtype(),
"bitErrBucketArray30": pandas.StringDtype(),
"bitErrBucketArray31": pandas.StringDtype(),
"bitErrBucketArray32": pandas.StringDtype(),
"bitErrBucketArray33": pandas.StringDtype(),
"bitErrBucketArray34": pandas.StringDtype(),
"bitErrBucketArray35": pandas.StringDtype(),
"bitErrBucketArray36": pandas.StringDtype(),
"bitErrBucketArray37": pandas.StringDtype(),
"bitErrBucketArray38": pandas.StringDtype(),
"bitErrBucketArray39": pandas.StringDtype(),
"bitErrBucketArray40": pandas.StringDtype(),
"bitErrBucketArray41": pandas.StringDtype(),
"bitErrBucketArray42": pandas.StringDtype(),
"bitErrBucketArray43": pandas.StringDtype(),
"bitErrBucketArray44": pandas.StringDtype(),
"bitErrBucketArray45": pandas.StringDtype(),
"bitErrBucketArray46": pandas.StringDtype(),
"bitErrBucketArray47": pandas.StringDtype(),
"bitErrBucketArray48": pandas.StringDtype(),
"bitErrBucketArray49": pandas.StringDtype(),
"bitErrBucketArray50": pandas.StringDtype(),
"bitErrBucketArray51": pandas.StringDtype(),
"bitErrBucketArray52": pandas.StringDtype(),
"bitErrBucketArray53": pandas.StringDtype(),
"bitErrBucketArray54": pandas.StringDtype(),
"bitErrBucketArray55": pandas.StringDtype(),
"bitErrBucketArray56": pandas.StringDtype(),
"bitErrBucketArray57": pandas.StringDtype(),
"bitErrBucketArray58": pandas.StringDtype(),
"bitErrBucketArray59": pandas.StringDtype(),
"bitErrBucketArray60": pandas.StringDtype(),
"bitErrBucketArray61": pandas.StringDtype(),
"bitErrBucketArray62": pandas.StringDtype(),
"bitErrBucketArray63": pandas.StringDtype(),
"bitErrBucketArray64": pandas.StringDtype(),
"bitErrBucketArray65": pandas.StringDtype(),
"bitErrBucketArray66": pandas.StringDtype(),
"bitErrBucketArray67": pandas.StringDtype(),
"bitErrBucketArray68": pandas.StringDtype(),
"bitErrBucketArray69": pandas.StringDtype(),
"bitErrBucketArray70": pandas.StringDtype(),
"bitErrBucketArray71": pandas.StringDtype(),
"bitErrBucketArray72": pandas.StringDtype(),
"bitErrBucketArray73": pandas.StringDtype(),
"bitErrBucketArray74": pandas.StringDtype(),
"bitErrBucketArray75": pandas.StringDtype(),
"bitErrBucketArray76": pandas.StringDtype(),
"bitErrBucketArray77": pandas.StringDtype(),
"bitErrBucketArray78": pandas.StringDtype(),
"bitErrBucketArray79": pandas.StringDtype(),
"bitErrBucketArray80": pandas.StringDtype(),
"mrr_successDistribution1": pandas.StringDtype(),
"mrr_successDistribution2": pandas.StringDtype(),
"mrr_successDistribution3": pandas.StringDtype(),
"mrr_successDistribution4": pandas.StringDtype(),
"mrr_successDistribution5": pandas.StringDtype(),
"mrr_successDistribution6": pandas.StringDtype(),
"mrr_successDistribution7": pandas.StringDtype(),
"mrr_successDistribution8": pandas.StringDtype(),
"mrr_successDistribution9": pandas.StringDtype(),
"mrr_successDistribution10": pandas.StringDtype(),
"mrr_successDistribution11": pandas.StringDtype(),
"mrr_successDistribution12": pandas.StringDtype(),
"mrr_successDistribution13": pandas.StringDtype(),
"mrr_successDistribution14": pandas.StringDtype(),
"mrr_successDistribution15": pandas.StringDtype(),
"mrr_successDistribution16": pandas.StringDtype(),
"mrr_successDistribution17": pandas.StringDtype(),
"mrr_successDistribution18": pandas.StringDtype(),
"mrr_successDistribution19": pandas.StringDtype(),
"mrr_successDistribution20": pandas.StringDtype(),
"mrr_successDistribution21": pandas.StringDtype(),
"mrr_successDistribution22": pandas.StringDtype(),
"mrr_successDistribution23": pandas.StringDtype(),
"mrr_successDistribution24": pandas.StringDtype(),
"mrr_successDistribution25": pandas.StringDtype(),
"mrr_successDistribution26": pandas.StringDtype(),
"mrr_successDistribution27": pandas.StringDtype(),
"mrr_successDistribution28": pandas.StringDtype(),
"mrr_successDistribution29": pandas.StringDtype(),
"mrr_successDistribution30": pandas.StringDtype(),
"mrr_successDistribution31": pandas.StringDtype(),
"mrr_successDistribution32": pandas.StringDtype(),
"mrr_successDistribution33": pandas.StringDtype(),
"mrr_successDistribution34": pandas.StringDtype(),
"mrr_successDistribution35": pandas.StringDtype(),
"mrr_successDistribution36": pandas.StringDtype(),
"mrr_successDistribution37": pandas.StringDtype(),
"mrr_successDistribution38": pandas.StringDtype(),
"mrr_successDistribution39": pandas.StringDtype(),
"mrr_successDistribution40": pandas.StringDtype(),
"mrr_successDistribution41": pandas.StringDtype(),
"mrr_successDistribution42": pandas.StringDtype(),
"mrr_successDistribution43": pandas.StringDtype(),
"mrr_successDistribution44": pandas.StringDtype(),
"mrr_successDistribution45": pandas.StringDtype(),
"mrr_successDistribution46": pandas.StringDtype(),
"mrr_successDistribution47": pandas.StringDtype(),
"mrr_successDistribution48": pandas.StringDtype(),
"mrr_successDistribution49": pandas.StringDtype(),
"mrr_successDistribution50": pandas.StringDtype(),
"mrr_successDistribution51": pandas.StringDtype(),
"mrr_successDistribution52": pandas.StringDtype(),
"mrr_successDistribution53": pandas.StringDtype(),
"mrr_successDistribution54": pandas.StringDtype(),
"mrr_successDistribution55": pandas.StringDtype(),
"mrr_successDistribution56": pandas.StringDtype(),
"mrr_successDistribution57": pandas.StringDtype(),
"mrr_successDistribution58": pandas.StringDtype(),
"mrr_successDistribution59": pandas.StringDtype(),
"mrr_successDistribution60": pandas.StringDtype(),
"mrr_successDistribution61": pandas.StringDtype(),
"mrr_successDistribution62": pandas.StringDtype(),
"mrr_successDistribution63": pandas.StringDtype(),
"mrr_successDistribution64": pandas.StringDtype(),
"blDowngradeCount": pandas.StringDtype(),
"snapReads": pandas.StringDtype(),
"pliCapTestTime": pandas.StringDtype(),
"currentTimeToFreeSpaceRecovery": pandas.StringDtype(),
"worstTimeToFreeSpaceRecovery": pandas.StringDtype(),
"rspnandReads": pandas.StringDtype(),
"cachednandReads": pandas.StringDtype(),
"spnandReads": | pandas.StringDtype() | pandas.StringDtype |
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
import matchzoo
from .dataset import *
class MLDataSet:
def fold(self, fold : int):
train_relation = []
for index in self.train_index_list[fold]:
train_relation.append(self.relation[index])
train_pack = self.build_datapack(train_relation)
test_relation = []
for index in self.test_index_list[fold]:
test_relation.append(self.relation[index])
test_pack = self.build_datapack(test_relation)
return train_pack, test_pack
def build_datapack(self, relation):
relation_df = pd.DataFrame(relation, columns=['id_left', 'id_right', 'label'])
qid_set = set()
docno_set = set()
for rel in relation:
qid_set.add(rel[0])
docno_set.add(rel[1])
left = []
for qid, query in self.dataset.query_set.queries.items():
if str(qid) in qid_set:
left.append([str(qid), query.querystr])
left_df = pd.DataFrame(left, columns=['id_left', 'text_left'])
left_df.set_index('id_left', inplace=True)
right = []
N = self.dataset.collection.N
for doc_id in range(N):
doc_no = self.dataset.collection.docno(doc_id)
if doc_no in docno_set:
text = self.dataset.collection.doc(doc_id)
text = text[0:100]
right.append([doc_no, text])
right_df = | pd.DataFrame(right, columns=['id_right', 'text_right']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pandas as pd
from scipy.spatial import distance
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
from yellowbrick.cluster import KElbowVisualizer
def findCluster(X):
""" Find clusters using GaussianMixture
Call getElbow to find out the best number of clusters, and then use that to form clusters.
Parameters
----------
X : numpy array
Features of training dataset
Returns
-------
gmm : GaussianMixture()
The trained cluster that can be used to predict which cluster an observation belongs to.
"""
n_clusters = getElbow(X)
gmm = GaussianMixture(n_components=n_clusters, random_state=8)
gmm.fit(X)
return gmm
def getElbow(X):
""" Use Elbow approach to find best number of clusters
Call yellowbrick.cluster.KElbowVisualizer to find out the best number of clusters.
Parameters
----------
X : numpy array
Features of training dataset
Returns
-------
visualizer.elbow_value_ : int
The best number of clusters based on the passed in dataset
"""
model = KMeans(random_state=8)
visualizer = KElbowVisualizer(model, k=(4,12))
visualizer.fit(X)
return visualizer.elbow_value_
def addClusterFeatures(gmm, X, columns):
""" Add cluster grouping feature and the distance to the cluster centre feature to dataframe
Parameters
----------
gmm : GaussianMixture()
The trained cluster that can be used to predict which cluster an observation belongs to.
X : numpy array
Features of training dataset
columns : List
The list of column names to restructure the dataframe for X
Returns
-------
df : DataFrame
The reconstructed DataFrame from passed in numpy array plus features cluster and distance_to_cluster_mean
"""
means = gmm.means_
df = | pd.DataFrame(X, columns=columns) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 19 22:33:01 2019
@author: BCJuan
Functions for the pipeline devoted to create the full dataset
All the files (.csv) must be in a folder named data. The structure is the same
as the zip dowloaded from http://climate-challenge.herokuapp.com/data/.
However there is a new folder named examples which has the sample
submission files.
All the data is saved in data (if stated so in args).
So the file should only be called once.
However you can call the file from another file without saving
"""
from sklearn.base import BaseEstimator, TransformerMixin
from os import path, listdir, mkdir
import pandas as pd
import urllib.request as req
from zipfile import ZipFile
from io import BytesIO
import geopy.distance as distance
from tqdm import tqdm as tqdm
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
OFFICIAL_ATTR = [['DATA', 'Tm', 'Tx', 'Tn', 'PPT24h', 'HRm',
'hPa', 'RS24h', 'VVem6', 'DVum6', 'VVx6', 'DVx6', 'ESTACIO'],
['DATA', 'Tm', 'Tx', 'Tn', 'HRm', 'ESTACIO'],
['DATA', 'Tm', 'Tx', 'Tn', 'PPT24h', 'HRm',
'hPa', 'RS24h', 'VVem10', 'DVum10', 'VVx10', 'DVx10',
'ESTACIO'],
['DATA', 'Tm', 'Tx', 'Tn', 'PPT24h', 'HRm',
'hPa', 'RS24h', 'VVem10', 'DVum10', 'VVx10', 'DVx10',
'ESTACIO']]
OFFICIAL_ATTR_HOURLY = [['DATA', 'T', 'TX', 'TN', 'HR', 'HRN', 'HRX', 'PPT',
'VVM10', 'DVM10', 'VVX10', 'DVVX10', 'Unnamed: 13'],
['DATA', 'T', 'Tx', 'Tn', 'HR', 'HRN', 'HRX'],
['DATA', 'T', 'Tx', 'Tn', 'HR', 'HRn', 'HRx', 'PPT',
'VVM10', 'DVM10', 'VVX10', 'DVX10', 'RS'],
['DATA', 'T', 'Tx', 'Tn', 'HR', 'HRn', 'HRx', 'PPT',
'VVM10', 'DVM10', 'VVX10', 'DVX10', 'RS']
]
UNOFFICIAL_ATTR = [['DATA', 'Alt', 'Temp_Max', 'Temp_Min', 'Hum_Max',
'Hum_Min', 'Pres_Max', 'Pres_Min', 'Wind_Max',
'Prec_Today',
'Prec_Year']]
def create_partial():
real_2016 = pd.read_csv("./climateChallengeData/real/real_2016.csv",
index_col=None)
real_2016 = real_2016.groupby('day',
as_index=False).agg({'T_MEAN': 'mean'})
real_2016.columns = ['date', 'mean']
real_2016.to_csv("./data_for_models/sub_partial.csv")
class DataFrameSelector(BaseEstimator, TransformerMixin):
"""
Class for selecting columns from a dataframe by using a list
with the column names
"""
def __init__(self, attribute_names):
self.attribute_names = attribute_names
def fit(self, X, y=None):
return self
def transform(self, X):
return X.loc[:, self.attribute_names]
class official_station_adder(BaseEstimator, TransformerMixin):
"""
Class for adding attributes from official stations to the values predicted
from the model (real values). The data is the joint for the moment
"""
def __init__(self, attributes_to_add,
include_distance=True, distances=None):
self.attributes_to_add = attributes_to_add
self.include_distance = include_distance
if self.include_distance:
self.distances = distances
def fit(self, X, y=None):
return self
def transform(self, X, y):
# to_datetime functions is super slow if format is not supplied
self.full = X
jj = len(self.full.columns)
for i, j in tqdm(zip(self.attributes_to_add,
range(len(self.attributes_to_add))),
total=len(self.attributes_to_add)):
y[j]['DATA'] = pd.to_datetime(y[j]['DATA'],
format="%Y-%m-%d", exact=True)
y[j]['DATA'] = y[j]['DATA'].dt.strftime("%Y-%m-%d")
self.full = pd.merge(self.full, y[j][i], how='inner',
left_on='day', right_on='DATA',
suffixes=("_" + str(jj),
"_" + str(jj+1)))
jj += 1
if self.include_distance:
self.full = pd.merge(self.full, self.distances, how='inner',
left_on='idx', right_on='idx',
suffixes=("_0", "_1"))
return self.full
def read_real_files(direc="./climateChallengeData/real"):
"""
Takes the files in the folder real and groups them in a sole pandas
THis dataframe will be the base for adding more features and make the
training val and test division
args
-----
direc (numeric):
where the files are located
returns
------
full_file (dataframe):
dataframe with all the years and each day withe corresponding
data
"""
files = []
for i in listdir(direc):
name = path.join(direc, i)
files.append( | pd.read_csv(name) | pandas.read_csv |
import pandas as pd
from uhxl import UhExcelFile
DATA = "tests/data/test.xlsx"
FILE = UhExcelFile(DATA, hide_rows=True, hide_columns=True)
def test_read_excel_sheet1():
df = pd.read_excel(FILE, sheet_name="sheet1")
assert isinstance(df, pd.DataFrame)
assert df.equals(pd.DataFrame({"A": ["a1", "a2"], "B": ["b1", "b2"]}))
def test_read_excel_sheet2():
df = | pd.read_excel(FILE, sheet_name="sheet2") | pandas.read_excel |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Receives UDP packets from iPadPix over WiFi.
Applies cluster classification and stores results as pandas data frame in python's .pkl format
Recording is stopped and data saved after hitting Ctrl-C.
Prints histogram overview plots when finished.
Requirements besides numpy, pandas and matplotlib modules:
avro-python3 module version 1.8.2.
@author: <NAME>
@date: February 2019
"""
import avro.schema
import avro.io
import socket, io
import struct
import datetime
import pandas as pd
import numpy as np
import time
import sys
import matplotlib.pyplot as plt
HOSTNAME = 'ozelmacpro.local' # specifiy correct hostname or IP to listen on the right network interface
UDP_PORT = 8123
# <codecell>
def resample(df, unit, period = 1):
if unit == 'm':
pdOffsetAlias = 'min' #equals 'T'!
else:
pdOffsetAlias = unit
dg = df.groupby([pd.Grouper(freq=str(period)+pdOffsetAlias, key='ts'),'ptype']).size().unstack()
dgtd = dg.index - dg.index[0]
dgtd = dgtd.astype('timedelta64[' + unit + ']').astype(int)
dg = dg.set_index(dgtd)
dg.index.name = '[' + pdOffsetAlias + ']'
dg = dg.fillna(0)
return dg
# <codecell>
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if sys.platform == "linux":
# used more precise socket receive timestamps under unix
SO_TIMESTAMPNS = 35 #Linux?
sock.setsockopt(socket.SOL_SOCKET, SO_TIMESTAMPNS, 1)
sock.bind((HOSTNAME, UDP_PORT))
schema = avro.schema.Parse(open("ipadpix_schema.json").read())
ptypes = | pd.Series(["alpha", "beta", "betagamma", "x-ray", "muon" ,"unknown"], dtype="category") | pandas.Series |
"""Remotely control your Binance account via their API : https://binance-docs.github.io/apidocs/spot/en"""
import re
import json
import hmac
import hashlib
import time
import requests
import base64
import sys
import math
import pandas as pd
import numpy as np
from numpy import floor
from datetime import datetime, timedelta
from requests.auth import AuthBase
from requests import Request, Session
from models.helper.LogHelper import Logger
from urllib.parse import urlencode
DEFAULT_MAKER_FEE_RATE = 0.0015 # added 0.0005 to allow for price movements
DEFAULT_TAKER_FEE_RATE = 0.0015 # added 0.0005 to allow for price movements
DEFAULT_TRADE_FEE_RATE = 0.0015 # added 0.0005 to allow for price movements
DEFAULT_GRANULARITY = "1h"
SUPPORTED_GRANULARITY = ["1m", "5m", "15m", "1h", "6h", "1d"]
MULTIPLIER_EQUIVALENTS = [1, 5, 15, 60, 360, 1440]
FREQUENCY_EQUIVALENTS = ["T", "5T", "15T", "H", "6H", "D"]
DEFAULT_MARKET = "BTCGBP"
class AuthAPIBase:
def _isMarketValid(self, market: str) -> bool:
p = re.compile(r"^[A-Z0-9]{5,12}$")
if p.match(market):
return True
return False
def convert_time(self, epoch: int = 0):
if math.isnan(epoch) is False:
epoch_str = str(epoch)[0:10]
return datetime.fromtimestamp(int(epoch_str))
class AuthAPI(AuthAPIBase):
def __init__(
self,
api_key: str = "",
api_secret: str = "",
api_url: str = "https://api.binance.com",
order_history: list = [],
recv_window: int = 5000,
) -> None:
"""Binance API object model
Parameters
----------
api_key : str
Your Binance account portfolio API key
api_secret : str
Your Binance account portfolio API secret
api_url
Binance API URL
"""
# options
self.debug = False
self.die_on_api_error = False
valid_urls = [
"https://api.binance.com",
"https://api.binance.us",
"https://testnet.binance.vision",
]
# validate Binance API
if api_url not in valid_urls:
raise ValueError("Binance API URL is invalid")
# validates the api key is syntactically correct
p = re.compile(r"^[A-z0-9]{64,64}$")
if not p.match(api_key):
self.handle_init_error("Binance API key is invalid")
# validates the api secret is syntactically correct
p = re.compile(r"^[A-z0-9]{64,64}$")
if not p.match(api_secret):
self.handle_init_error("Binance API secret is invalid")
self._api_key = api_key
self._api_secret = api_secret
self._api_url = api_url
# order history
self.order_history = order_history
# api recvWindow
self.recv_window = recv_window
def handle_init_error(self, err: str) -> None:
if self.debug:
raise TypeError(err)
else:
raise SystemExit(err)
def _dispatch_request(self, method: str):
session = Session()
session.headers.update(
{
"Content-Type": "application/json; charset=utf-8",
"X-MBX-APIKEY": self._api_key,
}
)
return {
"GET": session.get,
"DELETE": session.delete,
"PUT": session.put,
"POST": session.post,
}.get(method, "GET")
def createHash(self, uri: str = ""):
return hmac.new(
self._api_secret.encode("utf-8"), uri.encode("utf-8"), hashlib.sha256
).hexdigest()
def getTimestamp(self):
return int(time.time() * 1000)
def getAccounts(self) -> pd.DataFrame:
"""Retrieves your list of accounts"""
# GET /api/v3/account
try:
resp = self.authAPI(
"GET", "/api/v3/account", {"recvWindow": self.recv_window}
)
# unexpected data, then return
if len(resp) == 0 or "balances" not in resp:
return pd.DataFrame()
if isinstance(resp["balances"], list):
df = pd.DataFrame.from_dict(resp["balances"])
else:
df = pd.DataFrame(resp["balances"], index=[0])
# unexpected data, then return
if len(df) == 0:
return pd.DataFrame()
# exclude accounts that are locked
df = df[df.locked != 0.0]
df["locked"] = df["locked"].astype(bool)
# reset the dataframe index to start from 0
df = df.reset_index()
df["id"] = df["index"]
df["hold"] = 0.0
df["profile_id"] = None
df["available"] = df["free"]
df["id"] = df["id"].astype(object)
df["hold"] = df["hold"].astype(object)
# exclude accounts with a nil balance
df = df[df.available != "0.00000000"]
df = df[df.available != "0.00"]
# inconsistent columns, then return
if len(df.columns) != 8:
return pd.DataFrame()
# rename columns
df.columns = [
"index",
"currency",
"balance",
"trading_enabled",
"id",
"hold",
"profile_id",
"available",
]
# return if currency is missing
if "currency" not in df:
return df.DataFrame()
return df[
[
"index",
"id",
"currency",
"balance",
"hold",
"available",
"profile_id",
"trading_enabled",
]
]
except:
return pd.DataFrame()
def getAccount(self) -> pd.DataFrame:
"""Retrieves all accounts for Binance as there is no specific account id"""
return self.getAccounts()
def getFees(self, market: str = "") -> pd.DataFrame:
"""Retrieves a account fees"""
volume = 0
try:
# GET /api/v3/klines
resp = self.authAPI(
"GET",
"/api/v3/klines",
{"symbol": "BTCUSDT", "interval": "1d", "limit": 30},
)
# if response is empty, then return
if len(resp) == 0:
return pd.DataFrame()
# convert the API response into a Pandas DataFrame
df = pd.DataFrame(
resp,
columns=[
"open_time",
"open",
"high",
"low",
"close",
"volume",
"close_time",
"quote_asset_volume",
"number_of_trades",
"taker_buy_base_asset_volume",
"traker_buy_quote_asset_volume",
"ignore",
],
)
df["volume"] = df["volume"].astype(float)
volume = np.round(float(df[["volume"]].mean()))
except:
pass
# GET /api/v3/account
resp = self.authAPI("GET", "/api/v3/account", {"recvWindow": self.recv_window})
# unexpected data, then return
if len(resp) == 0:
return pd.DataFrame()
if "makerCommission" in resp and "takerCommission" in resp:
maker_fee_rate = resp["makerCommission"] / 10000
taker_fee_rate = resp["takerCommission"] / 10000
else:
maker_fee_rate = 0.001
taker_fee_rate = 0.001
return pd.DataFrame(
[
{
"maker_fee_rate": maker_fee_rate,
"taker_fee_rate": taker_fee_rate,
"usd_volume": volume,
"market": "",
}
]
)
def getMakerFee(self, market: str = "") -> float:
"""Retrieves the maker fee"""
if len(market):
fees = self.getFees(market)
else:
fees = self.getFees()
if len(fees) == 0 or "maker_fee_rate" not in fees:
Logger.error(
f"error: 'maker_fee_rate' not in fees (using {DEFAULT_MAKER_FEE_RATE} as a fallback)"
)
return DEFAULT_MAKER_FEE_RATE
return float(fees["maker_fee_rate"].to_string(index=False).strip())
def getTakerFee(self, market: str = "") -> float:
"""Retrieves the taker fee"""
if len(market) != None:
fees = self.getFees(market)
else:
fees = self.getFees()
if len(fees) == 0 or "taker_fee_rate" not in fees:
Logger.error(
f"error: 'taker_fee_rate' not in fees (using {DEFAULT_TAKER_FEE_RATE} as a fallback)"
)
return DEFAULT_TAKER_FEE_RATE
return float(fees["taker_fee_rate"].to_string(index=False).strip())
def getUSDVolume(self) -> float:
"""Retrieves the USD volume"""
fees = self.getFees()
return float(fees["usd_volume"].to_string(index=False).strip())
def getMarkets(self) -> list:
"""Retrieves a list of markets on the exchange"""
try:
# GET /api/v3/exchangeInfo
resp = self.authAPI("GET", "/api/v3/exchangeInfo")
# unexpected data, then return
if len(resp) == 0:
return pd.DataFrame()
if "symbols" in resp:
if isinstance(resp["symbols"], list):
df = pd.DataFrame.from_dict(resp["symbols"])
else:
df = pd.DataFrame(resp["symbols"], index=[0])
else:
df = pd.DataFrame()
return df[df["isSpotTradingAllowed"] == True][["symbol"]].squeeze().tolist()
except:
return pd.DataFrame()
def getOrders(
self,
market: str = "",
action: str = "",
status: str = "done",
order_history: list = [],
) -> pd.DataFrame:
"""Retrieves your list of orders with optional filtering"""
# if market provided
markets = None
if market != "":
# validates the market is syntactically correct
if not self._isMarketValid(market):
raise ValueError("Binance market is invalid.")
else:
if len(order_history) > 0 or status != "all":
full_scan = False
self.order_history = order_history
if len(self.order_history) > 0:
if self._isMarketValid(market) and market not in self.order_history:
self.order_history.append(market)
markets = self.order_history
else:
full_scan = True
markets = self.getMarkets()
# if action provided
if action != "":
# validates action is either a buy or sell
if not action in ["buy", "sell"]:
raise ValueError("Invalid order action.")
# validates status is either open, canceled, pending, done, active, or all
if not status in ["open", "canceled", "pending", "done", "active", "all"]:
raise ValueError("Invalid order status.")
try:
if markets is not None:
df = pd.DataFrame()
for market in markets:
if full_scan is True:
print(f"scanning {market} order history.")
# GET /api/v3/allOrders
resp = self.authAPI(
"GET",
"/api/v3/allOrders",
{"symbol": market, "recvWindow": self.recv_window},
)
# unexpected data, then return
if len(resp) == 0:
return pd.DataFrame()
if full_scan is True:
time.sleep(0.25)
if isinstance(resp, list):
df_tmp = pd.DataFrame.from_dict(resp)
else:
df_tmp = pd.DataFrame(resp, index=[0])
# unexpected data, then return
if len(df_tmp) == 0:
return pd.DataFrame()
if full_scan is True and len(df_tmp) > 0:
self.order_history.append(market)
if len(df_tmp) > 0:
df = pd.concat([df, df_tmp])
if full_scan is True:
print(
"add to order history to prevent full scan:", self.order_history
)
else:
# GET /api/v3/allOrders
resp = self.authAPI(
"GET",
"/api/v3/allOrders",
{"symbol": market, "recvWindow": self.recv_window},
)
# unexpected data, then return
if len(resp) == 0:
return pd.DataFrame()
if isinstance(resp, list):
df = pd.DataFrame.from_dict(resp)
else:
df = pd.DataFrame(resp, index=[0])
if len(df) == 0 or "time" not in df:
return pd.DataFrame()
# feature engineering
df.time = df["time"].map(self.convert_time)
df["time"] = pd.to_datetime(df["time"]).dt.tz_localize("UTC")
df["size"] = np.where(
df["side"] == "BUY",
df["cummulativeQuoteQty"],
np.where(df["side"] == "SELL", df["executedQty"], 222),
)
df["fees"] = df["size"].astype(float) * 0.001
df["fees"] = df["fees"].astype(object)
df["side"] = df["side"].str.lower()
df.rename(
columns={
"time": "created_at",
"symbol": "market",
"side": "action",
"executedQty": "filled",
},
errors="raise",
inplace=True,
)
def convert_status(status: str = ""):
if status == "FILLED":
return "done"
elif status == "NEW":
return "open"
elif status == "PARTIALLY_FILLED":
return "pending"
else:
return status
df.status = df.status.map(convert_status)
df["status"] = df["status"].str.lower()
def calculate_price(row):
if row.type == "LIMIT" and float(row.price) > 0:
return row.price
elif row.action == "buy":
return float(row.cummulativeQuoteQty) / float(row.filled)
elif row.action == "sell":
return float(row.cummulativeQuoteQty) / float(row.filled)
else:
return row.price
df["price"] = df.copy().apply(calculate_price, axis=1)
# select columns
df = df[
[
"created_at",
"market",
"action",
"type",
"size",
"filled",
"fees",
"price",
"status",
]
]
# filtering
if action != "":
df = df[df["action"] == action]
if status != "all":
df = df[df["status"] == status]
return df
except:
return pd.DataFrame()
def getTime(self) -> datetime:
"""Retrieves the exchange time"""
try:
# GET /api/v3/time
resp = self.authAPI("GET", "/api/v3/time")
return self.convert_time(int(resp["serverTime"]))
except:
return None
def getMarketInfoFilters(self, market: str) -> pd.DataFrame:
"""Retrieves markets exchange info"""
df = pd.DataFrame()
try:
# GET /api/v3/exchangeInfo
resp = self.authAPI("GET", "/api/v3/exchangeInfo", {"symbol": market})
# unexpected data, then return
if len(resp) == 0:
return pd.DataFrame()
if "symbols" in resp:
if isinstance(resp["symbols"], list):
if "filters" in resp["symbols"][0]:
df = | pd.DataFrame.from_dict(resp["symbols"][0]["filters"]) | pandas.DataFrame.from_dict |
# Data Preprocessing
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns; sns.set(style="ticks", color_codes=True)
import scipy.stats as stats
import statsmodels.api as sm
# Importing the dataset
dataset = | pd.read_csv('50_Startups.csv') | pandas.read_csv |
import unittest
import tempfile
import json
import numpy as np
import pandas as pd
from supervised.preprocessing.label_encoder import LabelEncoder
class LabelEncoderTest(unittest.TestCase):
def test_fit(self):
# training data
d = {"col1": ["a", "a", "c"], "col2": ["w", "e", "d"]}
df = pd.DataFrame(data=d)
le = LabelEncoder()
# check first column
le.fit(df["col1"])
data_json = le.to_json()
# values from column should be in data json
self.assertTrue("a" in data_json)
self.assertTrue("c" in data_json)
self.assertTrue("b" not in data_json)
# there is alphabetical order for values
self.assertEqual(0, data_json["a"])
self.assertEqual(1, data_json["c"])
# check next column
le.fit(df["col2"])
data_json = le.to_json()
self.assertEqual(0, data_json["d"])
self.assertEqual(1, data_json["e"])
self.assertEqual(2, data_json["w"])
def test_transform(self):
# training data
d = {"col1": ["a", "a", "c"]}
df = pd.DataFrame(data=d)
# fit encoder
le = LabelEncoder()
le.fit(df["col1"])
# test data
d_test = {"col2": ["c", "c", "a"]}
df_test = pd.DataFrame(data=d_test)
# transform
y = le.transform(df_test["col2"])
self.assertEqual(y[0], 1)
self.assertEqual(y[1], 1)
self.assertEqual(y[2], 0)
def test_transform_with_new_values(self):
# training data
d = {"col1": ["a", "a", "c"]}
df = pd.DataFrame(data=d)
# fit encoder
le = LabelEncoder()
le.fit(df["col1"])
# test data
d_test = {"col2": ["c", "a", "d", "f"]}
df_test = | pd.DataFrame(data=d_test) | pandas.DataFrame |
"""
Msgpack serializer support for reading and writing pandas data structures
to disk
portions of msgpack_numpy package, by <NAME> were incorporated
into this module (and tests_packers.py)
License
=======
Copyright (c) 2013, <NAME>.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of <NAME> nor the names of any
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from datetime import date, datetime, timedelta
from io import BytesIO
import os
from textwrap import dedent
import warnings
from dateutil.parser import parse
import numpy as np
from pandas.errors import PerformanceWarning
from pandas.util._move import (
BadMove as _BadMove, move_into_mutable_buffer as _move_into_mutable_buffer)
from pandas.core.dtypes.common import (
is_categorical_dtype, is_datetime64tz_dtype, is_object_dtype,
needs_i8_conversion, pandas_dtype)
from pandas import ( # noqa:F401
Categorical, CategoricalIndex, DataFrame, DatetimeIndex, Float64Index,
Index, Int64Index, Interval, IntervalIndex, MultiIndex, NaT, Panel, Period,
PeriodIndex, RangeIndex, Series, TimedeltaIndex, Timestamp)
from pandas.core import internals
from pandas.core.arrays import DatetimeArray, IntervalArray, PeriodArray
from pandas.core.arrays.sparse import BlockIndex, IntIndex
from pandas.core.generic import NDFrame
from pandas.core.internals import BlockManager, _safe_reshape, make_block
from pandas.core.sparse.api import SparseDataFrame, SparseSeries
from pandas.io.common import _stringify_path, get_filepath_or_buffer
from pandas.io.msgpack import ExtType, Packer as _Packer, Unpacker as _Unpacker
# check which compression libs we have installed
try:
import zlib
def _check_zlib():
pass
except ImportError:
def _check_zlib():
raise ImportError('zlib is not installed')
_check_zlib.__doc__ = dedent(
"""\
Check if zlib is installed.
Raises
------
ImportError
Raised when zlib is not installed.
""",
)
try:
import blosc
def _check_blosc():
pass
except ImportError:
def _check_blosc():
raise ImportError('blosc is not installed')
_check_blosc.__doc__ = dedent(
"""\
Check if blosc is installed.
Raises
------
ImportError
Raised when blosc is not installed.
""",
)
# until we can pass this into our conversion functions,
# this is pretty hacky
compressor = None
def to_msgpack(path_or_buf, *args, **kwargs):
"""
msgpack (serialize) object to input file path
THIS IS AN EXPERIMENTAL LIBRARY and the storage format
may not be stable until a future release.
Parameters
----------
path_or_buf : string File path, buffer-like, or None
if None, return generated string
args : an object or objects to serialize
encoding : encoding for unicode objects
append : boolean whether to append to an existing msgpack
(default is False)
compress : type of compressor (zlib or blosc), default to None (no
compression)
"""
global compressor
compressor = kwargs.pop('compress', None)
append = kwargs.pop('append', None)
if append:
mode = 'a+b'
else:
mode = 'wb'
def writer(fh):
for a in args:
fh.write(pack(a, **kwargs))
path_or_buf = _stringify_path(path_or_buf)
if isinstance(path_or_buf, str):
with open(path_or_buf, mode) as fh:
writer(fh)
elif path_or_buf is None:
buf = BytesIO()
writer(buf)
return buf.getvalue()
else:
writer(path_or_buf)
def read_msgpack(path_or_buf, encoding='utf-8', iterator=False, **kwargs):
"""
Load msgpack pandas object from the specified
file path
THIS IS AN EXPERIMENTAL LIBRARY and the storage format
may not be stable until a future release.
Parameters
----------
path_or_buf : string File path, BytesIO like or string
encoding : Encoding for decoding msgpack str type
iterator : boolean, if True, return an iterator to the unpacker
(default is False)
Returns
-------
obj : same type as object stored in file
"""
path_or_buf, _, _, should_close = get_filepath_or_buffer(path_or_buf)
if iterator:
return Iterator(path_or_buf)
def read(fh):
unpacked_obj = list(unpack(fh, encoding=encoding, **kwargs))
if len(unpacked_obj) == 1:
return unpacked_obj[0]
if should_close:
try:
path_or_buf.close()
except IOError:
pass
return unpacked_obj
# see if we have an actual file
if isinstance(path_or_buf, str):
try:
exists = os.path.exists(path_or_buf)
except (TypeError, ValueError):
exists = False
if exists:
with open(path_or_buf, 'rb') as fh:
return read(fh)
if isinstance(path_or_buf, bytes):
# treat as a binary-like
fh = None
try:
fh = BytesIO(path_or_buf)
return read(fh)
finally:
if fh is not None:
fh.close()
elif hasattr(path_or_buf, 'read') and callable(path_or_buf.read):
# treat as a buffer like
return read(path_or_buf)
raise ValueError('path_or_buf needs to be a string file path or file-like')
dtype_dict = {21: np.dtype('M8[ns]'),
'datetime64[ns]': np.dtype('M8[ns]'),
'datetime64[us]': np.dtype('M8[us]'),
22: np.dtype('m8[ns]'),
'timedelta64[ns]': np.dtype('m8[ns]'),
'timedelta64[us]': np.dtype('m8[us]'),
# this is platform int, which we need to remap to np.int64
# for compat on windows platforms
7: np.dtype('int64'),
'category': 'category'
}
def dtype_for(t):
""" return my dtype mapping, whether number or name """
if t in dtype_dict:
return dtype_dict[t]
return np.typeDict.get(t, t)
c2f_dict = {'complex': np.float64,
'complex128': np.float64,
'complex64': np.float32}
# windows (32 bit) compat
if hasattr(np, 'float128'):
c2f_dict['complex256'] = np.float128
def c2f(r, i, ctype_name):
"""
Convert strings to complex number instance with specified numpy type.
"""
ftype = c2f_dict[ctype_name]
return np.typeDict[ctype_name](ftype(r) + 1j * ftype(i))
def convert(values):
""" convert the numpy values to a list """
dtype = values.dtype
if is_categorical_dtype(values):
return values
elif | is_object_dtype(dtype) | pandas.core.dtypes.common.is_object_dtype |
"""
This program implements a French Flash Card Game
"""
import tkinter as tk
import pandas as pd
from random import choice
BACKGROUND_COLOR = "#B1DDC6"
# Tracks the current word from the file
current_card = {}
# Stores the entire list of words from file
word_list = {}
try:
# Read the words to learn if this app has been used before
df = | pd.read_csv('./data/words_to_learn.csv') | pandas.read_csv |
#!/usr/bin/env python
"""
Usage: Ingredient_scraper.py
This script extract information on the ingredients and quantities
from the Allerhande XML data and turns this into a csv.
"""
# author: <EMAIL>
from bs4 import BeautifulSoup
import pandas as pd
import glob
file_list = glob.glob("./www.ah.nl/allerhande/recept/**/*/*", recursive=True)
def allerhande_parser(data):
"""Function for extracting ingredients and units from Allerhande xml"""
dfcols = ['id_', 'data-search-term', 'description-plural',
'description-plural', 'quantity', 'quantity-unit-singular',
'quantity-unit-plural', 'additional_info', 'default-label']
df = pd.DataFrame(columns=dfcols)
for i in data:
xml = open(i)
soup = BeautifulSoup(xml, 'lxml-xml')
if soup.find('div', class_='container detail') is not None:
id_ = soup.find('div', class_='container detail').attrs['data-dax-id']
else:
id_ = ""
for data in soup.find_all('ul', class_='list shopping ingredient-selector-list'):
for a in data.find_all('a'):
term = a.attrs['data-search-term']
value1 = a.attrs['data-description-singular']
value2 = a.attrs['data-description-plural']
value3 = a.attrs['data-quantity']
value4 = a.attrs['data-quantity-unit-singular']
value5 = a.attrs['data-quantity-unit-plural']
value6 = a.attrs['data-additional-info']
value7 = a.attrs['data-default-label']
df = df.append( | pd.Series([id_, term, value1, value2, value3, value4, value5, value6, value7], index=dfcols) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 18 15:05:09 2019
@author: riddaali
"""
import pandas as pd
import seaborn as sns # For plots/graphs
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
riddas_palette = ['#fe46a5', '#7f5f00','#c1f80a', '#3b638c', '#82cafc',
'#90b134', '#08ff08', '#9d7651', '#C03028', '#06470c',
'#7038F8', '#E0C068', '#EE99AC']
# =============================================================================
# Placing the legend at the top right corner within the plot by setting the
# "loc" (location) = 0 and the "borderaxespad" (pad among the axes and the
# border) = 0.
# =============================================================================
def plot_obesity_age(new_df):
"""
Plot 1: Obesity vs Age.
:param [new_df]: [cleaned dataframe that will be used to generate the
"Obesity vs Age" plot]
"""
fig1 = plt.figure()
sns.countplot(y='Overweight_Obese', hue='Age_Categories', data=new_df,
palette=riddas_palette)
# =============================================================================
# Saving the plot in the specified path by using the function "savefig"
# from the "matplotlib.pyplot" library
# =============================================================================
fig1.savefig('./Plots/Obesity_vs_Age')
plt.show()
def plot_obesity_education(new_df):
"""
Plot 2: Obesity vs Education Level.
:param [new_df]: [cleaned dataframe that will be used to generate the
"Obesity vs Education Level" plot]
"""
fig2 = plt.figure()
sns.countplot(y='Overweight_Obese', hue='Education_Level', data=new_df,
palette="Set2")
plt.legend(title="Level of Education",loc=0, borderaxespad=0.)
fig2.savefig('./Plots/Obesity_vs_Education')
plt.show()
def plot_obesity_income(new_df):
"""
Plot 3: Obesity vs Income Categories.
:param [new_df]: [cleaned dataframe that will be used to generate the
"Obesity vs Income Categories" plot]
"""
fig3 = plt.figure()
sns.countplot(y='Overweight_Obese', hue='Income_Categories', data=new_df,
palette="Set2")
plt.legend(title="Income Categories",loc=0, borderaxespad=0.)
fig3.savefig('./Plots/Obesity_vs_Income')
plt.show()
def plot_obesity_exercise(new_df):
"""
Plot 4: Obesity vs Exercise.
:param [new_df]: [cleaned dataframe that will be used to generate the
"Obesity vs Exercise" plot]
"""
fig4 = plt.figure()
sns.countplot(y='Overweight_Obese', hue='Exercise', data=new_df,
palette="Set2")
plt.legend(title="Exercise",loc=0, borderaxespad=0.)
fig4.savefig('./Plots/Obesity_vs_Exercise')
plt.show()
def plot_obesity_gender(new_df):
"""
Plot 5: Obesity vs Gender.
:param [new_df]: [cleaned dataframe that will be used to generate the
"Obesity vs Gender" plot]
"""
fig5 = plt.figure()
sns.countplot(y='Overweight_Obese', hue='Gender', data=new_df,
palette="Set2")
plt.legend(title="Gender",loc=0, borderaxespad=0.)
fig5.savefig('./Plots/Obesity_vs_Gender')
plt.show()
def plot_obesity_alcohol(new_df):
"""
Plot 6: Obesity vs Alcohol Consumption.
:param [new_df]: [cleaned dataframe that will be used to generate the
"Obesity vs Alcohol Consumption" plot]
"""
fig6 = plt.figure()
sns.countplot(y='Overweight_Obese', hue='Alcohol_Consumption', data=new_df,
palette="Set2")
plt.legend(title="Alcohol Consumption",loc=0, borderaxespad=0.)
fig6.savefig('./Plots/Obesity_vs_Alcohol')
plt.show()
def plot_obesity_arthritis(new_df):
"""
Plot 7: Obesity vs Arthritis.
:param [new_df]: [cleaned dataframe that will be used to generate the
"Obesity vs Arthritis" plot]
"""
fig7 = plt.figure()
sns.countplot(y='Overweight_Obese', hue='Arthritis', data=new_df,
palette="Set2")
plt.legend(title="Arthritis", loc=0, borderaxespad=0.)
fig7.savefig('./Plots/Obesity_vs_Arthritis')
plt.show()
def featureImportance():
"""
Creating the "Feature Importance" plot for the Random Forest model by using
the built-in class "feature_importances_".
Source used for guidance:
- https://stackoverflow.com/questions/44101458/random-forest-feature-importance-chart-using-python
"""
new_df = | pd.read_csv('BRFSS_cleaned.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
# Imputation of missing values for numerical variables
# by the mode (the value that appears most often in a set of data values).
class ImputerNumericalVariable(BaseEstimator, TransformerMixin):
def __init__(self, variables=None):
self.variables = variables
def fit(self, X, y=None):
self.variable_mode_dict = {}
for variable in self.variables:
self.variable_mode_dict[variable] = X[variable].mode()[0]
return self
def transform(self, X):
X = X.copy()
for variable in self.variables:
X[variable].fillna(self.variable_mode_dict[variable], inplace=True)
return X
# Imputation of missing values for categorical variables.
# Replace missing values with new label: "missing_value".
class ImputerCategoricalVariable(BaseEstimator, TransformerMixin):
def __init__(self, variables=None):
self.variables = variables
def fit(self, X, y=None):
return self
def transform(self, X):
X = X.copy()
for variable in self.variables:
X[variable] = X[variable].fillna("missing_value")
return X
# Logarithm transformation of non-normal distributed variables.
class TransformerLogarithm(BaseEstimator, TransformerMixin):
def __init__(self, variables=None):
self.variables = variables
def fit(self, X, y=None):
return self
def transform(self, X):
X = X.copy()
for variable in self.variables:
X[variable] = np.log(X[variable])
return X
# Get the time elapsed between variable and the year in which the house was sold
class ProcessorTemporalVariable(BaseEstimator, TransformerMixin):
def __init__(self, variables=None, related_variable=None):
self.variables = [variables]
self.related_variables = related_variable
def fit(self, X, y=None):
return self
def transform(self, X):
X = X.copy()
for variable in self.variables:
X[variable] = X[self.related_variables] - X[variable]
return X
# Replace rare labels (which appear only in a small proportion of the observations) by the string "rare_label".
class EncoderRareLabel(BaseEstimator, TransformerMixin):
def __init__(self, tolerance, variables=None):
self.variables = variables
self.tolerance = tolerance
def fit(self, X, y=None):
self.rare_label_dict = {}
for variable in self.variables:
frequent_var = pd.Series(X[variable].value_counts() / np.float(len(X)))
self.rare_label_dict[variable] = list(frequent_var[frequent_var >= self.tolerance].index)
return self
def transform(self, X):
X = X.copy()
for variable in self.variables:
X[variable] = np.where(X[variable].isin(self.rare_label_dict[variable]), X[variable], "rare_label")
return X
# Drop unnecessary variables.
class DropSelectedVariable(BaseEstimator, TransformerMixin):
def __init__(self, variables=None):
self.variables = variables
def fit(self, X, y=None):
return self
def transform(self, X):
X = X.copy()
X = X.drop(self.variables, axis=1)
return X
# Transform the strings of the categorical variables into numbers.
class EncoderCategoricalVariable(BaseEstimator, TransformerMixin):
def __init__(self, variables=None):
self.variables = variables
def fit(self, X, y):
temp = | pd.concat([X, y], axis=1) | pandas.concat |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.cloud import storage
from pandas.io import gbq
import pandas as pd
import pickle
import re
import os
class PatentLandscapeExpander:
"""Class for L1&L2 expansion as 'Automated Patent Landscaping' describes.
This object takes a seed set and a Google Cloud BigQuery project name and
exposes methods for doing expansion of the project. The logical entry-point
to the class is load_from_disk_or_do_expansion, which checks for cached
expansions for the given self.seed_name, and if a previous run is available
it will load it from disk and return it; otherwise, it does L1 and L2
expansions, persists it in a cached 'data/[self.seed_name]/' directory,
and returns the data to the caller.
"""
seed_file = None
# BigQuery must be enabled for this project
bq_project = 'patent-landscape-165715'
patent_dataset = 'patents-public-data:patents.publications_latest'
#tmp_table = 'patents._tmp'
l1_tmp_table = 'patents._l1_tmp'
l2_tmp_table = 'patents._l2_tmp'
antiseed_tmp_table = 'patents.antiseed_tmp'
country_codes = set(['US'])
num_anti_seed_patents = 15000
us_only = True
# ratios and multipler for finding uniquely common CPC codes from seed set
min_ratio_of_code_to_seed = 0.04
min_seed_multiplier = 50.0
# persisted expansion information
training_data_full_df = None
seed_patents_df = None
l1_patents_df = None
l2_patents_df = None
anti_seed_patents = None
seed_data_path = None
def __init__(self, seed_file, seed_name, bq_project=None, patent_dataset=None, num_antiseed=None, us_only=None, prepare_training=True):
self.seed_file = seed_file
self.seed_data_path = os.path.join('data', seed_name)
if bq_project is not None:
self.bq_project = bq_project
if patent_dataset is not None:
self.patent_dataset = patent_dataset
#if tmp_table is not None:
# self.tmp_table = tmp_table
if num_antiseed is not None:
self.num_anti_seed_patents = num_antiseed
if us_only is not None:
self.us_only = us_only
self.prepare_training = prepare_training
def load_seeds_from_bq(self, seed_df):
where_clause = ",".join("'" + seed_df.PubNum + "'")
if self.us_only:
seed_patents_query = '''
SELECT
b.publication_number,
'Seed' as ExpansionLevel,
STRING_AGG(citations.publication_number) AS refs,
STRING_AGG(cpcs.code) AS cpc_codes
FROM
`patents-public-data.patents.publications` AS b,
UNNEST(citation) AS citations,
UNNEST(cpc) AS cpcs
WHERE
REGEXP_EXTRACT(b.publication_number, r'\w+-(\w+)-\w+') IN
(
{}
)
AND b.country_code = 'US'
AND citations.publication_number != ''
AND cpcs.code != ''
GROUP BY b.publication_number
;
'''.format(where_clause)
else:
seed_patents_query = '''
SELECT
b.publication_number,
'Seed' as ExpansionLevel,
STRING_AGG(citations.publication_number) AS refs,
STRING_AGG(cpcs.code) AS cpc_codes
FROM
`patents-public-data.patents.publications` AS b,
UNNEST(citation) AS citations,
UNNEST(cpc) AS cpcs
WHERE
b.publication_number IN
(
{}
)
AND citations.publication_number != ''
AND cpcs.code != ''
GROUP BY b.publication_number
;
'''.format(where_clause)
seed_patents_df = gbq.read_gbq(
query=seed_patents_query,
project_id=self.bq_project,
verbose=False,
dialect='standard')
return seed_patents_df
def load_seed_pubs(self, seed_file=None):
if seed_file is None:
seed_file = self.seed_file
#if self.us_only:
seed_df = pd.read_csv(seed_file, header=None, names=['PubNum'], dtype={'PubNum': 'str'})
#else:
# seed_df = pd.read_csv(seed_file, header=None, names=['publication_number'], dtype={'publication_number': 'str'})
return seed_df
def bq_get_num_total_patents(self):
if self.us_only:
num_patents_query = """
SELECT
COUNT(publication_number) AS num_patents
FROM
`patents-public-data.patents.publications` AS b
WHERE
country_code = 'US'
"""
else:
num_patents_query = """
SELECT
COUNT(publication_number) AS num_patents
FROM
`patents-public-data.patents.publications` AS b
"""
num_patents_df = gbq.read_gbq(
query=num_patents_query,
project_id=self.bq_project,
verbose=False,
dialect='standard')
return num_patents_df
def get_cpc_counts(self, seed_publications=None):
where_clause = '1=1'
if seed_publications is not None:
if self.us_only:
where_clause = """
REGEXP_EXTRACT(b.publication_number, r'\w+-(\w+)-\w+') IN
(
{}
)
""".format(",".join("'" + seed_publications + "'"))
else:
where_clause = """
b.publication_number IN
(
{}
)
""".format(",".join("'" + seed_publications + "'"))
if self.us_only:
cpc_counts_query = """
SELECT
cpcs.code,
COUNT(cpcs.code) AS cpc_count
FROM
`patents-public-data.patents.publications` AS b,
UNNEST(cpc) AS cpcs
WHERE
{}
AND cpcs.code != ''
AND country_code = 'US'
GROUP BY cpcs.code
ORDER BY cpc_count DESC;
""".format(where_clause)
else:
cpc_counts_query = """
SELECT
cpcs.code,
COUNT(cpcs.code) AS cpc_count
FROM
`patents-public-data.patents.publications` AS b,
UNNEST(cpc) AS cpcs
WHERE
{}
AND cpcs.code != ''
GROUP BY cpcs.code
ORDER BY cpc_count DESC;
""".format(where_clause)
return gbq.read_gbq(
query=cpc_counts_query,
project_id=self.bq_project,
verbose=False,
dialect='standard')
def compute_uniquely_common_cpc_codes_for_seed(self, seed_df):
'''
Queries for CPC counts across all US patents and all Seed patents, then finds the CPC codes
that are 50x more common in the Seed set than the rest of the patent corpus (and also appear in
at least 5% of Seed patents). This then returns a Pandas dataframe of uniquely common codes
as well as the table of CPC counts for reference. Note that this function makes several
BigQuery queries on multi-terabyte datasets, so expect it to take a couple minutes.
You should call this method like:
uniquely_common_cpc_codes, cpc_counts_df = \
expander.compute_uniquely_common_cpc_codes_for_seed(seed_df)
where seed_df is the result of calling load_seed_pubs() in this class.
'''
print('Querying for all US CPC Counts')
us_cpc_counts_df = self.get_cpc_counts()
print(us_cpc_counts_df.shape)
print('Querying for Seed Set CPC Counts')
seed_cpc_counts_df = self.get_cpc_counts(seed_df.PubNum)
print(seed_cpc_counts_df.shape)
print("Querying to find total number of US patents")
num_patents_df = self.bq_get_num_total_patents()
num_seed_patents = seed_df.count().values[0]
num_us_patents = num_patents_df['num_patents'].values[0]
# Merge/join the dataframes on CPC code, suffixing them as appropriate
cpc_counts_df = us_cpc_counts_df.merge(
seed_cpc_counts_df, on='code', suffixes=('_us', '_seed')) \
.sort_values(ascending=False, by=['cpc_count_seed'])
# For each CPC code, calculate the ratio of how often the code appears
# in the seed set vs the number of total seed patents
cpc_counts_df['cpc_count_to_num_seeds_ratio'] = cpc_counts_df.cpc_count_seed / num_seed_patents
# Similarly, calculate the ratio of CPC document frequencies vs total number of US patents
cpc_counts_df['cpc_count_to_num_us_ratio'] = cpc_counts_df.cpc_count_us / num_us_patents
# Calculate how much more frequently a CPC code occurs in the seed set vs full corpus of US patents
cpc_counts_df['seed_relative_freq_ratio'] = \
cpc_counts_df.cpc_count_to_num_seeds_ratio / cpc_counts_df.cpc_count_to_num_us_ratio
# We only care about codes that occur at least ~4% of the time in the seed set
# AND are 50x more common in the seed set than the full corpus of US patents
uniquely_common_cpc_codes = cpc_counts_df[
(cpc_counts_df.cpc_count_to_num_seeds_ratio >= self.min_ratio_of_code_to_seed)
&
(cpc_counts_df.seed_relative_freq_ratio >= self.min_seed_multiplier)]
return uniquely_common_cpc_codes, cpc_counts_df
def get_set_of_refs_filtered_by_country(self, seed_refs_series, country_codes):
'''
Uses the refs column of the BigQuery on the seed set to compute the set of
unique references out of the Seed set.
'''
all_relevant_refs = set()
for refs in seed_refs_series:
for ref in refs.split(','):
if self.us_only:
country_code = re.sub(r'(\w+)-(\w+)-\w+', r'\1', ref)
if country_code in country_codes:
all_relevant_refs.add(ref)
else:
all_relevant_refs.add(ref)
return all_relevant_refs
# Expansion Functions
def load_df_to_bq_tmp(self, df, tmp_table):
'''
This function inserts the provided dataframe into a temp table in BigQuery, which
is used in other parts of this class (e.g. L1 and L2 expansions) to join on by
patent number.
'''
print('Loading dataframe with cols {}, shape {}, to {}'.format(
df.columns, df.shape, tmp_table))
gbq.to_gbq(
dataframe=df,
destination_table=tmp_table,
project_id=self.bq_project,
if_exists='replace',
verbose=False)
print('Completed loading temp table.')
def expand_l2(self, refs_series):
if self.us_only:
self.load_df_to_bq_tmp(pd.DataFrame(refs_series, columns=['pub_num']), self.l2_tmp_table)
expansion_query = '''
SELECT
b.publication_number,
'L2' AS ExpansionLevel,
STRING_AGG(citations.publication_number) AS refs
FROM
`patents-public-data.patents.publications` AS b,
`{}` as tmp,
UNNEST(citation) AS citations
WHERE
(
b.publication_number = tmp.pub_num
)
AND citations.publication_number != ''
GROUP BY b.publication_number
;
'''.format(self.l2_tmp_table)
else:
self.load_df_to_bq_tmp(pd.DataFrame(refs_series, columns=['publication_number']), self.l2_tmp_table)
expansion_query = '''
SELECT
b.publication_number,
'L2' AS ExpansionLevel,
STRING_AGG(citations.publication_number) AS refs
FROM
`patents-public-data.patents.publications` AS b,
`{}` as tmp,
UNNEST(citation) AS citations
WHERE
(
b.publication_number = tmp.publication_number
)
AND citations.publication_number != ''
GROUP BY b.publication_number
;
'''.format(self.l2_tmp_table)
#print(expansion_query)
expansion_df = gbq.read_gbq(
query=expansion_query,
project_id=self.bq_project,
verbose=False,
dialect='standard')
return expansion_df
def expand_l1(self, cpc_codes_series, refs_series):
if self.us_only:
self.load_df_to_bq_tmp(pd.DataFrame(refs_series, columns=['pub_num']), self.l1_tmp_table)
else:
self.load_df_to_bq_tmp( | pd.DataFrame(refs_series, columns=['publication_number']) | pandas.DataFrame |
import numpy as np
import pandas as pd
from sklearn.base import clone
from sklearn.model_selection import KFold
def data_generator(n, noise='normal', scale=.5, underlying_relation='linear'):
x = np.linspace(start=0, stop=10, num=n)
if underlying_relation == 'linear':
y = x * 1
elif underlying_relation == 'sine_wave':
y = 2 * np.sin(x) + 5
if noise == 'normal':
y += np.random.normal(size=len(y), scale=scale)
elif noise == 'constant':
y += scale
return pd.DataFrame(dict(x=x, y=y))
def KFold_split(df_data, df_data_extra, n_splits, shuffle):
X = df_data.x.values.reshape(-1, 1)
df = pd.DataFrame(dict(x=[], y=[], data_split=[], in_train_set=[], y_pred=[]))
splitter = KFold(n_splits=n_splits, shuffle=shuffle)
for fold_nbr, (train_idx, test_idx) in enumerate(splitter.split(X)):
data_split = pd.Series(f'fold:{fold_nbr}', index=df_data.index)
in_train_set = pd.Series('out_fold', index=df_data.index)
in_train_set[train_idx] = 'in_fold'
df_fold = pd.DataFrame(dict(x=df_data.x, y=df_data.y,
data_split=data_split,
in_train_set=in_train_set, y_pred=np.full((len(df_data)), np.nan)
))
df_extra = df_data_extra.copy()
df_extra['data_split'] = pd.Series(f'fold:{fold_nbr}', index=df_extra.index)
df_extra['in_train_set'] = | pd.Series('extra_test', index=df_extra.index) | pandas.Series |
# -*- coding:utf-8 -*-
# By:<NAME>
# Create:2019-12-23
# Update:2021-10-20
# For: Scrape data from weibo and a simple and not so rigours sentiment analysis based on sentiment dictionary
import requests
import re
import os
import time
import random
from lxml import etree
from datetime import datetime, timedelta
import pandas as pd
from urllib.request import quote, unquote
from fp.fp import FreeProxy
class ScrapePosts:
def __init__(self,kw=None,cookies=None,headers=None,use_prox=True,st=None,et=None,sort="hot",cr_url=True):
self.cookies = cookies
self.headers = headers
if use_prox:
self.new_proxy()
else:
self.proxies = None
self.keyword = quote(kw, encoding='utf-8') if kw is not None else None
self.starttime = datetime.strptime(st, '%Y/%m/%d') if st is not None else None
self.endtime = datetime.strptime(et, '%Y/%m/%d') if et is not None else None
self.sort = sort
self.url = self.get_url() if cr_url else None
def new_proxy(self, rand = True):
self.proxies = FreeProxy(rand=rand).get()
def change_endtime(self,date):
self.endtime = datetime.strptime(date, '%Y/%m/%d')
self.url = self.get_url()
def change_starttime(self,date):
self.starttime = datetime.strptime(date, '%Y/%m/%d')
self.url = self.get_url()
def change_kw(self,kw):
self.keyword = quote(kw, encoding='utf-8')
self.url = self.get_url()
def change_sort(self,sort):
self.sort = sort
self.url = self.get_url()
def get_filter(self):
self.keyword = input("Please input keyword:")
self.endtime = input("Please input end time(yyyy/mm/dd):")
self.starttime = input("Please input start time(yyyy/mm/dd):")
self.sort = input("Please choose sorting method(time/hot):")
# Sometimes it's ok to just put Chinese words into the url, but it will be better to encode with URL encoding
self.keyword = quote(self.keyword, encoding='utf-8')
self.starttime = datetime.strptime(self.starttime, '%Y/%m/%d')
self.endtime = datetime.strptime(self.endtime, '%Y/%m/%d')
# get the url, note that we need to paste the page= to the url
# and the function returns a list of urls, each of which searches for the posts within one day
def get_url(self):
# default start time is Jan-01, 2010, default sort method is by time(could be by 'hot')
search_url = 'https://weibo.cn/search/mblog?hideSearchFrame='
delta = self.endtime - self.starttime + timedelta(days=1)
url = [None] * delta.days
i = 0
while i < delta.days:
url[i] = search_url + "&keyword=" + self.keyword + "&advancedfilter=1" + "&starttime=" + (
self.starttime + timedelta(days=i)).strftime('%Y%m%d') + "&endtime=" + (
self.starttime + timedelta(days=i)).strftime('%Y%m%d') + "&sort=" + self.sort
i += 1
return url
# create a tiny function to create name
def save_html(self, url, html):
ed = re.findall(r'endtime=(.*?)&', url)[0]
pg = re.findall(r'page=(.*)', url)[0]
name = '_'.join([unquote(self.keyword), ed, pg])
save = open('.//html/%s.txt' % name, "w", encoding="utf-8")
save.write('%s' % html)
save.close()
# note that if you generate the url from geturl function, you will need to add the "&page=" to the url
def get_html(self, url, save_html=True, use_prox=True):
# find the headers, you will need the cookies that is freshly baked, you will need the Fiddler to get cookies
headers = {
'User-Agent': self.headers,
'Cookie': self.cookies
}
if use_prox:
proxies = {
"https": self.proxies.replace("http://",""),
"http": self.proxies.replace("http://", "")
}
response = requests.get(url, headers=headers, proxies=proxies)
else:
response = requests.get(url, headers=headers)
response.encoding = "utf-8"
# to know if we successfully get the response
if response.status_code != 200:
print('\nResponse Error!')
html = response.text
if save_html:
self.save_html(url, html)
html = bytes(html, encoding='utf-8')
html = etree.HTML(html)
return html
def total_page(self, html):
try:
page = html.xpath("//div[@class='pa']//div/text()")
page = str(page)
page = int(re.findall(r'/(.*?)页', str(page))[0])
if page > 100:
page = 100
return page
except Exception as e:
return 0
print(f'Error while getting the total page,{e}')
def parse_html(self, html):
post_list = html.xpath("//div[@class='c'][@id]")
info_list = []
for post in post_list:
poster = post.xpath(".//div/a[@class='nk']/text()")[0]
poster_url = post.xpath(".//div/a[@class='nk']/@href")[0]
post_date = post.xpath(".//div/span[@class='ct']/text()")[0]
post_like = post.xpath(".//div/a[@href]/text()")[-4]
post_repo = post.xpath(".//div/a[@href]/text()")[-3]
post_cmt = post.xpath(".//div/a[@href]/text()")[-2]
div = post.xpath(".//div")
if len(div) == 1:
post_txt = etree.tostring(post.xpath(".//div/span[@class='ctt']")[0], encoding="unicode")
post_txt = post_txt.replace('<span class="ctt">:', '')
post_txt = post_txt.replace(f'<span class="kt">{self.keyword}</span>', self.keyword)
post_txt = post_txt.replace('</span>\xa0', '')
# Here, as above, the data we get may contain nothing or only what the last user who repoed had written
# let's just tackle it later
o_poster, o_poster_url, o_post_txt, o_post_like, o_post_repo, o_post_cmt = None, None, None, None, None, None
elif len(div) == 2:
try:
temp_post = div[1].xpath(".//text()")
post_txt = " ".join(temp_post[:len(temp_post) - 9])
except Exception as e1:
post_txt, post_like, post_repo, post_cmt = None, None, None, None
print("Error in getting repo information, error type:%s" % e1)
if div[0].xpath(".//span[@class='cmt']/a[@href]/text()"):
o_poster = div[0].xpath(".//span[@class='cmt']/a[@href]/text()")[0]
o_poster_url = div[0].xpath(".//span[@class='cmt']/a/@href")[0]
o_post_txt = etree.tostring(div[0].xpath(".//span[@class='ctt']")[0], encoding="unicode")
o_post_txt = re.sub(r'<[\w+/](.*?)[\"/\w]>', '', o_post_txt)
o_post_txt = re.sub(r'[\s]+', '', o_post_txt)
o_post_like = div[0].xpath(".//span[@class='cmt']/text()")[2]
o_post_repo = div[0].xpath(".//span[@class='cmt']/text()")[3]
o_post_cmt = div[0].xpath(".//a[@class='cc']/text()")[0]
else:
o_poster, o_poster_url, o_post_txt, o_post_like, o_post_repo, o_post_cmt = None, None, None, None, None, None
# print("Warning: this user can be posting a pic, userID is %s.\r" % poster)
elif len(div) == 3:
try:
temp_post = div[2].xpath(".//text()")
post_txt = " ".join(temp_post[:len(temp_post) - 9])
except Exception as e3:
post_txt, post_like, post_repo, post_cmt = None, None, None, None
print("Error in getting repo information, error type:%s" % e3)
o_poster = div[0].xpath(".//span[@class='cmt']/a[@href]/text()")[0]
o_poster_url = div[0].xpath(".//span[@class='cmt']/a/@href")[0]
# here we can not just choose the text, because people might have @others and posts some hashtags which
# will be eliminated if we only return the text
o_post_txt = etree.tostring(div[0].xpath(".//span[@class='ctt']")[0], encoding="unicode")
o_post_txt = re.sub(r'<[\w+/](.*?)[\"/\w]>', '', o_post_txt)
o_post_txt = re.sub(r'[\s]+', '', o_post_txt)
o_post_like = div[1].xpath(".//span[@class='cmt']/text()")[0]
o_post_repo = div[1].xpath(".//span[@class='cmt']/text()")[1]
o_post_cmt = div[1].xpath(".//a[@class='cc']/text()")[0]
else:
post_txt, post_like, post_repo, post_cmt = None, None, None, None
o_poster, o_poster_url, o_post_txt, o_post_like, o_post_repo, o_post_cmt = None, None, None, None, None, None
print("Error in implement")
info = {
'user_id': poster,
'user_url': poster_url,
'post_date': post_date,
'post_content': post_txt,
'post_like': post_like,
'post_repo': post_repo,
'post_comment': post_cmt,
'original_poster_id': o_poster,
'original_poster_url': o_poster_url,
'original_post_content': o_post_txt,
'original_post_like': o_post_like,
'original_post_repo': o_post_repo,
'original_post_comment': o_post_cmt
}
info_list.append(info)
info_list = pd.DataFrame(info_list)
return (info_list)
def post_list(self, get_ttp = True,use_prox=True):
info_list = pd.DataFrame()
# from the first page, get the total page of each day and also the first html
timer = 0
for url in self.url:
timer = timer + 1
i = 1
child_url = []
child_url.append(url + "&page=1")
try:
html = self.get_html(child_url[0],use_prox=use_prox)
info = self.parse_html(html)
# save the data just in case
if not os.path.isfile("%s\Desktop\data\%s_append.csv" % (os.environ["HOMEPATH"],unquote(self.keyword))):
info.to_csv("%s\Desktop\data\%s_append.csv" % (os.environ["HOMEPATH"],unquote(self.keyword)), header=True)
else: # else it exists so append without writing the header
info.to_csv("%s\Desktop\data\%s_append.csv" % (os.environ["HOMEPATH"],unquote(self.keyword)),
mode='a', header=False)
info_list = pd.concat([info_list, info], axis=0, ignore_index=True)
# print("Great! Make it again!")
ttp = self.total_page(html) if get_ttp else 100
# sleep
time.sleep(random.uniform(1, 4))
# the second loop is to get html from each page of the day
print("Try fetch data for day {}".format(re.findall(r'endtime=(.*?)&', url)[0]))
print(' Get a cup of tea :p '.center(100 // 2, '='))
start = time.perf_counter()
while i < ttp:
i = i + 1
child_url.append(url + "&page=%s" % i)
try:
html = self.get_html(child_url[i - 1],use_prox=use_prox)
info = self.parse_html(html)
# save the data just in case
if not os.path.isfile(
"%s\Desktop\data\%s_append.csv" % (os.environ["HOMEPATH"], unquote(self.keyword))):
info.to_csv(
"%s\Desktop\data\%s_append.csv" % (os.environ["HOMEPATH"], unquote(self.keyword)),
header=True)
else: # else it exists so append without writing the header
info.to_csv(
"%s\Desktop\data\%s_append.csv" % (os.environ["HOMEPATH"], unquote(self.keyword)),
mode='a', header=False)
info_list = pd.concat([info_list, info], axis=0, ignore_index=True)
time.sleep(random.uniform(1,2))
except Exception as e:
print("Error in getting info list, cheack the PostList. Error type: %s" % e)
if use_prox:
self.new_proxy()
time.sleep(5)
a = "*" * int(50 * i / ttp)
b = '.' * int(50 * (1 - (i / ttp)))
c = i / ttp * 100
dur = time.perf_counter() - start
left = dur / i * (ttp - i) / 60
print('\r{:^3.0f}%[{}->{}] Dur: {:.2f}min; Approx {:.2f}min left'.format(c, a, b, dur / 60, left),
end='')
print('\n' + ' Grattis! Everything Works! '.center(100 // 2, '=') + '\n' + '\n')
except Exception as e:
print("Error in getting info list, cheack the PostList. Error type: %s" % e)
if use_prox:
self.new_proxy()
time.sleep(5)
return info_list
'''
This function is write for the data scrapped and stored by the 'execute' with the object extract
Since the data scrapped are not perfect for the text analysis task so we do a little modification
to the data as well, after the process, new data frame will be stored in //analyse data/data
Note:
the function will return the data frame as well in the order of norm_user(who post/repo the
post), norm_user2(who are repoed, but we don't have their url,likes,ect), V_user(who are repoed,
and who are mostly popular weiboers)
'''
def divide_post(self,post_list,id_prefix):
post_list = post_list.drop_duplicates().reset_index(drop=True)
main_post = pd.DataFrame()
other_post = pd.DataFrame()
vip_post = pd.DataFrame()
print(' Get a cup of tea :p '.center(100 // 2, '='))
j = 0
start = time.perf_counter()
for i in post_list.index:
test_str = post_list['post_content'][i]
# pa is for the post we have scraped
pa_mpid = "%s%s%06d"%(id_prefix,0,i+1)
pa_uid = post_list['user_id'][i]
pa_url = post_list['user_url'][i].replace("https://weibo.cn/","")
try:
pa_time = re.findall(r"\d{2}月\d{2}日\s\d{2}:\d{2}", post_list['post_date'][i])[0]
pa_time = datetime.strptime(pa_time, '%m月%d日 %H:%M')
pa_time = pa_time.replace(year=2020)
except:
pa_time = None
try:
pa_dev = re.findall(r"来自(.*)", post_list['post_date'][i])[0]
except:
pa_dev = None
pa_like = int(re.sub("[\D]","",post_list['post_like'][i]))
pa_repo = int(re.sub("[\D]","",post_list['post_repo'][i]))
pa_cmt = int(re.sub("[\D]","",post_list['post_comment'][i]))
# v is for the post that is been reposted, most of which is popular posters
v_post = post_list['original_post_content'][i]
try:
v_uid = post_list['original_poster_id'][i]
v_url = post_list['original_poster_url'][i].replace("https://weibo.cn/","")
v_like = int(re.sub("[\D]","",post_list['original_post_like'][i]))
v_repo = int(re.sub("[\D]","",post_list['original_post_repo'][i]))
v_cmt = int(re.sub("[\D]","",post_list['original_post_comment'][i]))
temp_v = {
'MP_id': pa_mpid,
'OP_id': "%s%s%06d"%(id_prefix,1,i+1),
'OP_user_id': v_uid,
'OP_user_url': v_url,
'OP_content': v_post,
'OP_like': v_like,
'OP_repo': v_repo,
'OP_cmt': v_cmt
}
temp_v = pd.DataFrame(temp_v, index=[0])
vip_post = pd.concat([vip_post, temp_v], ignore_index=True, axis=0)
except:
v_url = None
# print('\rThere is no original post!')
try:
pa_post = re.findall(r'转发理由: (.*?)//', test_str)[0]
pa_post = re.sub(r'[\s]+', '', pa_post)
except:
pa_post = None
temp_main = {
'MP_id': pa_mpid,
'MP_user_id': pa_uid,
'MP_user_url': pa_url,
'MP_date': pa_time,
'MP_dev': pa_dev,
'MP_content': pa_post,
'MP_like': pa_like,
'MP_repo': pa_repo,
'MP_cmt': pa_cmt,
'OP_uer_url': v_url
}
temp_main = pd.DataFrame(temp_main, index=[0])
main_post = pd.concat([main_post, temp_main], ignore_index=True, axis=0)
ch_posts = re.split(r'[//|\xa0]', test_str)
for t_post in ch_posts:
if re.search('@', t_post) != None:
try:
ch_uid = re.findall('@(.*?)\s{0,3}:', t_post)[0],
ch_post = re.findall(':(.*)', t_post)[0]
ch_post = re.sub(r'[\s]+', '', ch_post)
temp = {
'IP_user_id': ch_uid,
'IP_content': ch_post,
'MP_user_url': pa_url,
'MP_id': pa_mpid,
'OP_user_url': v_url,
'OP_id': "%s%s%06d"%(id_prefix,1,i+1)
}
temp = pd.DataFrame(temp)
other_post = pd.concat([other_post, temp], ignore_index=True, axis=0)
except:
# print("\rThis user repo without comment!")
pass
j += 1
if j%50 == 0:
c = j / len(post_list.index)
a = "*" * int(50 * c)
b = '.' * int(50 * (1 - c))
dur = time.perf_counter() - start
left = dur / c * (1 - c) / 60
print('\r{:^3.0f}%[{}->{}] Dur: {:.2f}min; Approx {:.2f}min left'.format(c*100, a, b, dur / 60, left),
end='')
print('\n' + ' Grattis! Everything Works! '.center(100 // 2, '=') + '\n' + '\n')
return main_post, other_post, vip_post
# give a list of post_lists from ScrapePosts.post_list, this function will return a user list
# with only user name, user ID, and user url with duplicates being removed
def merge_data(self,user_list):
data = pd.concat(user_list, ignore_index=True, axis=0)
data = data.drop_duplicates()
d = data[['user_id', 'user_url']]
c = data[['original_poster_id', 'original_poster_url']]
c.columns = ['user_id', 'user_url']
e = pd.concat([d, c], ignore_index=True, axis=0)
e = e.drop_duplicates().dropna().reset_index(drop=True)
e = e.rename(columns={"user_id": "user_name"})
e["user_id"] = pd.DataFrame(re.sub(r"https:\/\/weibo\.cn\/u*\/*", "", x) for x in e["user_url"])
return e
# this will give a list
def parse_user_page(self, html, uid, is_str=False):
if is_str:
html = bytes(html, encoding='utf-8')
html = etree.HTML(html)
# try to get the user id, just to make sure if we are on the right person
# and some user id get from post are not proper ids(consist of only numbers)
# so by doing this we can replace those bad ids
try:
user_id = "".join(html.xpath("//div[@class='u']//span[@class='ctt']/a/@href"))
user_id = re.findall(r"uid=(\d+)&", user_id)[0]
except:
user_id = None
user_info = "".join(html.xpath("//div[@class='u']//span[@class='ctt']/text()"))
user_name = re.sub(r'\xa0[男女].*', '', user_info)
user_gender = re.findall(r'\xa0([男女])', user_info)[0]
try:
user_city = re.sub(r"\s+", "", re.findall(r'\xa0[男女]\/(.*)\xa0', user_info)[0])
except:
user_city = None
try:
posts = html.xpath("//div[@class='tip2']/span/text()")[0]
posts = re.sub("\D+","",posts)
follows = html.xpath("//div[@class='tip2']/a[@href]/text()")[0]
follows = re.sub("\D+", "", follows)
fans = html.xpath("//div[@class='tip2']/a[@href]/text()")[1]
fans = re.sub("\D+", "", fans)
except:
posts, follows, fans = None, None, None
try:
# flag if the user is an official account
off = html.xpath("//div[@class='u']//div[@class='ut']/span[@class='ctt']/img/@src")[0]
off_lab = html.xpath("//div[@class='u']//span[@class='ctt']/text()")[2].replace("认证:", "")
except:
off = None
off_lab = None
ff = {
'user_id_s': uid, # this id is from the html source, so we use this id to go to the user profile page
'user_id': user_id,
'user_name': user_name,
'user_gender': user_gender,
'user_city': user_city,
'user_post': posts,
'user_follow': follows,
'user_fan': fans,
'user_off': off,
'user_off_lab': off_lab
}
ff = pd.DataFrame(ff,index=[0])
return ff
def user_info_list(self, user_list,file_name,use_prox=True):
print('Start Scraping'.center(100 // 2, '='))
user_info_list = pd.DataFrame()
start = time.perf_counter()
for i in user_list.index:
user_url = user_list['user_url'][i]
user_id = re.sub(r"https:\/\/weibo\.cn\/","",user_url)
# this is to get the follower-fans information
try:
# self = luvcss
# user_url = 'https://weibo.cn/mysour'
# user_id = "mysour"
html_ff = self.get_html(user_url, save_html=False,use_prox=use_prox)
ff = self.parse_user_page(html = html_ff, uid=user_id)
user_info_list = pd.concat([user_info_list,ff],axis=0,ignore_index=True)
if not os.path.isfile("%s\Desktop\data\%s_append.csv" % (os.environ["HOMEPATH"],file_name)):
ff.to_csv("%s\Desktop\data\%s_append.csv" % (os.environ["HOMEPATH"],file_name), header=True)
else: # else it exists so append without writing the header
ff.to_csv("%s\Desktop\data\%s_append.csv" % (os.environ["HOMEPATH"],file_name),
mode='a', header=False)
time.sleep(random.uniform(1, 2))
except Exception as e:
if use_prox:
self.new_proxy()
print("Error happens while getting user info, read details:%s"%e)
j = i+1
a = "*" * int(50 * j / len(user_list.index))
b = '.' * int(50 * (1 - (j / len(user_list.index))))
c = j / len(user_list.index) * 100
dur = time.perf_counter() - start
left = dur / j * (len(user_list.index) - j) / 60
print('\r{:^3.0f}%[{}->{}] Dur: {:.2f}min; Approx {:.2f}min left'.format(c, a, b, dur / 60, left),
end='')
print('\n' + ' Grattis! Everything Works! '.center(100 // 2, '=') + '\n' + '\n')
return user_info_list
def parse_html2(self, html):
post_list = html.xpath("//div[@class='c'][@id]")
info_list = pd.DataFrame()
for post in post_list:
poster = post.xpath(".//div/a[@class='nk']/text()")[0]
poster_url = post.xpath(".//div/a[@class='nk']/@href")[0]
try:
poster_v = post.xpath(".//div/img[@alt='V']/@src")[0]
poster_v = re.sub("https:.*\/|\.gif", "", poster_v)
except:
poster_v = None
div = post.xpath(".//div")
if len(div) == 1:
o_poster, o_poster_url, o_poster_v = None, None, None
elif len(div) == 2:
if div[0].xpath(".//span[@class='cmt']/a[@href]/text()"):
o_poster = div[0].xpath(".//span[@class='cmt']/a[@href]/text()")[0]
o_poster_url = div[0].xpath(".//span[@class='cmt']/a/@href")[0]
try:
o_poster_v = div[0].xpath(".//span[@class='cmt']/img[@alt='V']/@src")[0]
o_poster_v = re.sub("https:.*\/|\.gif", "", o_poster_v)
except:
o_poster_v = None
else:
o_poster, o_poster_url, o_poster_v = None, None, None
# print("Warning: this user can be posting a pic, userID is %s.\r" % poster)
elif len(div) == 3:
o_poster = div[0].xpath(".//span[@class='cmt']/a[@href]/text()")[0]
o_poster_url = div[0].xpath(".//span[@class='cmt']/a/@href")[0]
try:
o_poster_v = div[0].xpath(".//span[@class='cmt']/img[@alt='V']/@src")[0]
o_poster_v = re.sub("https:.*\/|\.gif", "", o_poster_v)
except:
o_poster_v = None
else:
o_poster, o_poster_url, o_poster_v = None, None, None
print("Error in implement")
info = {
'user_id': poster,
'user_url': poster_url,
'user_vtype': poster_v
}
info = pd.DataFrame(info,index=[0])
info_list = pd.concat([info_list,info],axis=0,ignore_index=True)
info = {
'user_id': o_poster,
'user_url': o_poster_url,
'user_vtype': o_poster_v
}
info = pd.DataFrame(info,index=[0])
info_list = pd.concat([info_list,info],axis=0,ignore_index=True).dropna(subset=["user_vtype"]).drop_duplicates()
info_list = info_list.drop_duplicates()
return (info_list)
# below are some independent methods
# manually parse some of the data and give you a data the same structure as the
# ScrapePosts.post_list
def read_html(path,kw,htmls=None,error_report=False):
sp = ScrapePosts(kw=kw,cr_url=False,use_prox=False)
htmls = os.listdir(path) if htmls is None else htmls
raw = pd.DataFrame()
error = []
for i in htmls:
try:
f = open("%s\\%s"%(path,i), encoding='utf-8', mode = "r")
html = ''.join(f.readlines())
f.close()
try:
html = bytes(html, encoding='utf-8')
html = etree.HTML(html)
info = sp.parse_html2(html)
raw = pd.concat([raw, info], axis=0, ignore_index=True).drop_duplicates()
except Exception as e:
error.append(i)
print("an error while parsing the file:%s\nError:%s"%(i,e))
except:
print("an error while opening the file:%s" % i)
if error_report:
return raw, error
else:
return raw
# from a folder of htmls of the same topic, this function will read every html
# and return the numbers of relevant posts each day
def get_counts(path):
htmls = os.listdir(path)
counts = pd.DataFrame()
for i in htmls:
if re.search("_1\.txt",i) is not None:
kw = re.findall("^(.*)_\d{8}",i)[0]
date = datetime.strptime(re.findall("\d{8}",i)[0],"%Y%m%d").strftime("%Y/%m/%d")
f = open("%s\%s"%(path,i), encoding='utf-8', mode = "r")
html = ''.join(f.readlines())
f.close()
html = bytes(html, encoding='utf-8')
html = etree.HTML(html)
if re.search(r'抱歉,未找到(.*)相关结果。',
str(html.xpath("//div[@class='c']/text()")[0])) is not None:
count = 0
temp = {
'topic': [kw],
'date': [date],
'count': [count]
}
temp = pd.DataFrame(temp)
counts = pd.concat([counts, temp], axis=0, ignore_index=True)
else:
count = html.xpath("//div[@class='c']/span/text()")[0]
count = int(re.findall(r'共(.*?)条', str(count))[0])
temp = {
'topic': [kw],
'date': [date],
'count': [count]
}
temp = pd.DataFrame(temp)
counts = | pd.concat([counts,temp],axis=0,ignore_index=True) | pandas.concat |
import os
import random
import math
import numpy as np
import pandas as pd
import itertools
from functools import lru_cache
##########################
## Compliance functions ##
##########################
def delayed_ramp_fun(Nc_old, Nc_new, t, tau_days, l, t_start):
"""
t : timestamp
current date
tau : int
number of days before measures start having an effect
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * (t-t_start-tau_days)/pd.Timedelta('1D')
def ramp_fun(Nc_old, Nc_new, t, t_start, l):
"""
t : timestamp
current date
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * (t-t_start)/pd.Timedelta('1D')
###############################
## Mobility update functions ##
###############################
def load_all_mobility_data(agg, dtype='fractional', beyond_borders=False):
"""
Function that fetches all available mobility data and adds it to a DataFrame with dates as indices and numpy matrices as values. Make sure to regularly update the mobility data with the notebook notebooks/preprocessing/Quick-update_mobility-matrices.ipynb to get the data for the most recent days. Also returns the average mobility over all available data, which might NOT always be desirable as a back-up mobility.
Input
-----
agg : str
Denotes the spatial aggregation at hand. Either 'prov', 'arr' or 'mun'
dtype : str
Choose the type of mobility data to return. Either 'fractional' (default), staytime (all available hours for region g spent in h), or visits (all unique visits from region g to h)
beyond_borders : boolean
If true, also include mobility abroad and mobility from foreigners
Returns
-------
all_mobility_data : pd.DataFrame
DataFrame with datetime objects as indices ('DATE') and np.arrays ('place') as value column
average_mobility_data : np.array
average mobility matrix over all available dates
"""
### Validate input ###
if agg not in ['mun', 'arr', 'prov']:
raise ValueError(
"spatial stratification '{0}' is not legitimate. Possible spatial "
"stratifications are 'mun', 'arr', or 'prov'".format(agg)
)
if dtype not in ['fractional', 'staytime', 'visits']:
raise ValueError(
"data type '{0}' is not legitimate. Possible mobility matrix "
"data types are 'fractional', 'staytime', or 'visits'".format(dtype)
)
### Load all available data ###
# Define absolute location of this file
abs_dir = os.path.dirname(__file__)
# Define data location for this particular aggregation level
data_location = f'../../../data/interim/mobility/{agg}/{dtype}'
# Iterate over all available interim mobility data
all_available_dates=[]
all_available_places=[]
directory=os.path.join(abs_dir, f'{data_location}')
for csv in os.listdir(directory):
# take YYYYMMDD information from processed CSVs. NOTE: this supposes a particular data name format!
datum = csv[-12:-4]
# Create list of datetime objects
all_available_dates.append(pd.to_datetime(datum, format="%Y%m%d"))
# Load the CSV as a np.array
if beyond_borders:
place = pd.read_csv(f'{directory}/{csv}', index_col='mllp_postalcode').values
else:
place = pd.read_csv(f'{directory}/{csv}', index_col='mllp_postalcode').drop(index='Foreigner', columns='ABROAD').values
if dtype=='fractional':
# make sure the rows sum up to 1 nicely again after dropping a row and a column
place = place / place.sum(axis=1)
# Create list of places
all_available_places.append(place)
# Create new empty dataframe with available dates. Load mobility later
df = pd.DataFrame({'DATE' : all_available_dates, 'place' : all_available_places}).set_index('DATE')
all_mobility_data = df.copy()
# Take average of all available mobility data
average_mobility_data = df['place'].values.mean()
return all_mobility_data, average_mobility_data
class make_mobility_update_function():
"""
Output the time-dependent mobility function with the data loaded in cache
Input
-----
proximus_mobility_data : DataFrame
Pandas DataFrame with dates as indices and matrices as values. Output of mobility.get_proximus_mobility_data.
proximus_mobility_data_avg : np.array
Average mobility matrix over all matrices
"""
def __init__(self, proximus_mobility_data, proximus_mobility_data_avg):
self.proximus_mobility_data = proximus_mobility_data
self.proximus_mobility_data_avg = proximus_mobility_data_avg
@lru_cache()
# Define mobility_update_func
def __call__(self, t, default_mobility=None):
"""
time-dependent function which has a mobility matrix of type dtype for every date.
Note: only works with datetime input (no integer time steps). This
Input
-----
t : timestamp
current date as datetime object
states : str
formal necessity
param : str
formal necessity
default_mobility : np.array or None
If None (default), returns average mobility over all available dates. Else, return user-defined mobility
Returns
-------
place : np.array
square matrix with mobility of type dtype (fractional, staytime or visits), dimension depending on agg
"""
t = pd.Timestamp(t.date())
try: # if there is data available for this date (if the key exists)
place = self.proximus_mobility_data['place'][t]
except:
if default_mobility: # If there is no data available and a user-defined input is given
place = self.default_mobility
else: # No data and no user input: fall back on average mobility
place = self.proximus_mobility_data_avg
return place
def mobility_wrapper_func(self, t, states, param, default_mobility=None):
t = pd.Timestamp(t.date())
if t <= pd.Timestamp('2020-03-17'):
place = self.__call__(t, default_mobility=default_mobility)
return np.eye(place.shape[0])
else:
return self.__call__(t, default_mobility=default_mobility)
###################
## VOC functions ##
###################
class make_VOC_function():
"""
Class that returns a time-dependant parameter function for COVID-19 SEIRD model parameter alpha (variant fraction).
Current implementation includes the alpha - delta strains.
If the class is initialized without arguments, a logistic model fitted to prelevance data of the alpha-gamma variant is used. The class can also be initialized with the alpha-gamma prelavence data provided by Prof. <NAME>.
A logistic model fitted to prelevance data of the delta variant is always used.
Input
-----
*df_abc: pd.dataFrame (optional)
Alpha, Beta, Gamma prelevance dataset by <NAME>, obtained using:
`from covid19model.data import VOC`
`df_abc = VOC.get_abc_data()`
`VOC_function = make_VOC_function(df_abc)`
Output
------
__class__ : function
Default variant function
"""
def __init__(self, *df_abc):
self.df_abc = df_abc
self.data_given = False
if self.df_abc != ():
self.df_abc = df_abc[0] # First entry in list of optional arguments (dataframe)
self.data_given = True
@lru_cache()
def VOC_abc_data(self,t):
return self.df_abc.iloc[self.df_abc.index.get_loc(t, method='nearest')]['baselinesurv_f_501Y.V1_501Y.V2_501Y.V3']
@lru_cache()
def VOC_abc_logistic(self,t):
# Parameters obtained by fitting logistic model to weekly prevalence data
t_sig = pd.Timestamp('2021-02-14')
k = 0.07
# Function to return the fraction of the delta-variant
return 1/(1+np.exp(-k*(t-t_sig)/pd.Timedelta(days=1)))
@lru_cache()
def VOC_delta_logistic(self,t):
# Parameters obtained by fitting logistic model to weekly prevalence data
t_sig = pd.Timestamp('2021-06-25')
k = 0.11
# Function to return the fraction of the delta-variant
return 1/(1+np.exp(-k*(t-t_sig)/pd.Timedelta(days=1)))
# Default VOC function includes British and Indian variants
def __call__(self, t, states, param):
# Convert time to timestamp
t = pd.Timestamp(t.date())
# Introduction Indian variant
t1 = pd.Timestamp('2021-05-01')
# Construct alpha
if t <= t1:
if self.data_given:
return np.array([1-self.VOC_abc_data(t), self.VOC_abc_data(t), 0])
else:
return np.array([1-self.VOC_abc_logistic(t), self.VOC_abc_logistic(t), 0])
else:
return np.array([0, 1-self.VOC_delta_logistic(t), self.VOC_delta_logistic(t)])
###########################
## Vaccination functions ##
###########################
from covid19model.data.model_parameters import construct_initN
class make_vaccination_function():
"""
Class that returns a two-fold time-dependent parameter function for the vaccination strategy by default. First, first dose data by sciensano are used. In the future, a hypothetical scheme is used. If spatial data is given, the output consists of vaccination data per NIS code.
Input
-----
df : pd.dataFrame
*either* Sciensano public dataset, obtained using:
`from covid19model.data import sciensano`
`df = sciensano.get_sciensano_COVID19_data(update=False)`
*or* public spatial vaccination data, obtained using:
`from covid19model.data import sciensano`
`df = sciensano.get_public_spatial_vaccination_data(update=False,agg='arr')`
spatial : Boolean
True if df is spatially explicit. None by default.
Output
------
__class__ : function
Default vaccination function
"""
def __init__(self, df, age_classes=pd.IntervalIndex.from_tuples([(0,12),(12,18),(18,25),(25,35),(35,45),(45,55),(55,65),(65,75),(75,85),(85,120)], closed='left')):
age_stratification_size = len(age_classes)
# Assign inputs to object
self.df = df
self.age_agg = age_stratification_size
# Check if spatial data is provided
self.spatial = None
if 'NIS' in self.df.index.names:
self.spatial = True
self.space_agg = len(self.df.index.get_level_values('NIS').unique().values)
# infer aggregation (prov, arr or mun)
if self.space_agg == 11:
self.agg = 'prov'
elif self.space_agg == 43:
self.agg = 'arr'
elif self.space_agg == 581:
self.agg = 'mun'
else:
raise Exception(f"Space is {G}-fold stratified. This is not recognized as being stratification at Belgian province, arrondissement, or municipality level.")
# Check if dose data is provided
self.doses = None
if 'dose' in self.df.index.names:
self.doses = True
self.dose_agg = len(self.df.index.get_level_values('dose').unique().values)
# Define start- and enddate
self.df_start = pd.Timestamp(self.df.index.get_level_values('date').min())
self.df_end = pd.Timestamp(self.df.index.get_level_values('date').max())
# Perform age conversion
# Define dataframe with desired format
iterables=[]
for index_name in self.df.index.names:
if index_name != 'age':
iterables += [self.df.index.get_level_values(index_name).unique()]
else:
iterables += [age_classes]
index = pd.MultiIndex.from_product(iterables, names=self.df.index.names)
self.new_df = pd.Series(index=index)
# Four possibilities exist: can this be sped up?
if self.spatial:
if self.doses:
# Shorten?
for date in self.df.index.get_level_values('date').unique():
for NIS in self.df.index.get_level_values('NIS').unique():
for dose in self.df.index.get_level_values('dose').unique():
data = self.df.loc[(date, NIS, slice(None), dose)]
self.new_df.loc[(date, NIS, slice(None), dose)] = self.convert_age_stratified_vaccination_data(data, age_classes, self.agg, NIS).values
else:
for date in self.df.index.get_level_values('date').unique():
for NIS in self.df.index.get_level_values('NIS').unique():
data = self.df.loc[(date,NIS)]
self.new_df.loc[(date, NIS)] = self.convert_age_stratified_vaccination_data(data, age_classes, self.agg, NIS).values
else:
if self.doses:
for date in self.df.index.get_level_values('date').unique():
for dose in self.df.index.get_level_values('dose').unique():
data = self.df.loc[(date, slice(None), dose)]
self.new_df.loc[(date, slice(None), dose)] = self.convert_age_stratified_vaccination_data(data, age_classes).values
else:
for date in self.df.index.get_level_values('date').unique():
data = self.df.loc[(date)]
self.new_df.loc[(date)] = self.convert_age_stratified_vaccination_data(data, age_classes).values
self.df = self.new_df
def convert_age_stratified_vaccination_data(self, data, age_classes, agg=None, NIS=None):
"""
A function to convert the sciensano vaccination data to the desired model age groups
Parameters
----------
data: pd.Series
A series of age-stratified vaccination incidences. Index must be of type pd.Intervalindex.
age_classes : pd.IntervalIndex
Desired age groups of the vaccination dataframe.
agg: str
Spatial aggregation: prov, arr or mun
NIS : str
NIS code of consired spatial element
Returns
-------
out: pd.Series
Converted data.
"""
# Pre-allocate new series
out = pd.Series(index = age_classes, dtype=float)
# Extract demographics
if agg:
data_n_individuals = construct_initN(data.index.get_level_values('age'), agg).loc[NIS,:].values
demographics = construct_initN(None, agg).loc[NIS,:].values
else:
data_n_individuals = construct_initN(data.index.get_level_values('age'), agg).values
demographics = construct_initN(None, agg).values
# Loop over desired intervals
for idx,interval in enumerate(age_classes):
result = []
for age in range(interval.left, interval.right):
try:
result.append(demographics[age]/data_n_individuals[data.index.get_level_values('age').contains(age)]*data.iloc[np.where(data.index.get_level_values('age').contains(age))[0][0]])
except:
result.append(0)
out.iloc[idx] = sum(result)
return out
@lru_cache()
def get_data(self,t):
if self.spatial:
if self.doses:
try:
# Only includes doses A, B and C (so not boosters!) for now
data = np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
data[:,:,:-1] = np.array(self.df.loc[t,:,:,:].values).reshape( (self.space_agg, self.age_agg, self.dose_agg) )
return data
except:
return np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
else:
try:
return np.array(self.df.loc[t,:,:].values).reshape( (self.space_agg, self.age_agg) )
except:
return np.zeros([self.space_agg, self.age_agg])
else:
if self.doses:
try:
return np.array(self.df.loc[t,:,:].values).reshape( (self.age_agg, self.dose_agg) )
except:
return np.zeros([self.age_agg, self.dose_agg])
else:
try:
return np.array(self.df.loc[t,:].values)
except:
return np.zeros(self.age_agg)
def unidose_2021_vaccination_campaign(self, states, initN, daily_doses, delay_immunity, vacc_order, stop_idx, refusal):
# Compute the number of vaccine eligible individuals
VE = states['S'] + states['R']
# Initialize N_vacc
N_vacc = np.zeros(self.age_agg)
# Start vaccination loop
idx = 0
while daily_doses > 0:
if idx == stop_idx:
daily_doses = 0 #End vaccination campaign at age 20
elif VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]] > daily_doses:
N_vacc[vacc_order[idx]] = daily_doses
daily_doses = 0
else:
N_vacc[vacc_order[idx]] = VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]]
daily_doses = daily_doses - (VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]])
idx = idx + 1
return N_vacc
def booster_campaign(self, states, daily_doses, vacc_order, stop_idx, refusal):
# Compute the number of booster eligible individuals
VE = states['S'][:,2] + states['E'][:,2] + states['I'][:,2] + states['A'][:,2] + states['R'][:,2] \
+ states['S'][:,3] + states['E'][:,3] + states['I'][:,3] + states['A'][:,3] + states['R'][:,3]
# Initialize N_vacc
N_vacc = np.zeros([self.age_agg,self.dose_agg])
# Booster vaccination strategy without refusal
idx = 0
while daily_doses > 0:
if idx == stop_idx:
daily_doses= 0 #End vaccination campaign at age 20
elif VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]] > daily_doses:
N_vacc[vacc_order[idx],3] = daily_doses
daily_doses= 0
else:
N_vacc[vacc_order[idx],3] = VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]]
daily_doses = daily_doses - (VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]])
idx = idx + 1
return N_vacc
# Default vaccination strategy = Sciensano data + hypothetical scheme after end of data collection for unidose model only (for now)
def __call__(self, t, states, param, initN, daily_doses=60000, delay_immunity = 21, vacc_order = [8,7,6,5,4,3,2,1,0], stop_idx=9, refusal = [0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3]):
"""
time-dependent function for the Belgian vaccination strategy
First, all available first-dose data from Sciensano are used. Then, the user can specify a custom vaccination strategy of "daily_first_dose" first doses per day,
administered in the order specified by the vector "vacc_order" with a refusal propensity of "refusal" in every age group.
This vaccination strategy does not distinguish between vaccination doses, individuals are transferred to the vaccination circuit after some time delay after the first dose.
For use with the model `COVID19_SEIRD` and `COVID19_SEIRD_spatial_vacc` in `~src/models/models.py`
Parameters
----------
t : int
Simulation time
states: dict
Dictionary containing values of model states
param : dict
Model parameter dictionary
initN : list or np.array
Demographics according to the epidemiological model age bins
daily_first_dose : int
Number of doses administered per day. Default is 30000 doses/day.
delay_immunity : int
Time delay between first dose vaccination and start of immunity. Default is 21 days.
vacc_order : array
Vector containing vaccination prioritization preference. Default is old to young. Must be equal in length to the number of age bins in the model.
stop_idx : float
Index of age group at which the vaccination campaign is halted. An index of 9 corresponds to vaccinating all age groups, an index of 8 corresponds to not vaccinating the age group corresponding with vacc_order[idx].
refusal: array
Vector containing the fraction of individuals refusing a vaccine per age group. Default is 30% in every age group. Must be equal in length to the number of age bins in the model.
Return
------
N_vacc : np.array
Number of individuals to be vaccinated at simulation time "t" per age, or per [patch,age]
"""
# Convert time to suitable format
t = pd.Timestamp(t.date())
# Convert delay to a timedelta
delay = pd.Timedelta(str(int(delay_immunity))+'D')
# Compute vaccinated individuals after spring-summer 2021 vaccination campaign
check_time = pd.Timestamp('2021-10-01')
# Only for non-spatial multi-vaccindation dose model
if not self.spatial:
if self.doses:
if t == check_time:
self.fully_vaccinated_0 = states['S'][:,2] + states['E'][:,2] + states['I'][:,2] + states['A'][:,2] + states['R'][:,2] + \
states['S'][:,3] + states['E'][:,3] + states['I'][:,3] + states['A'][:,3] + states['R'][:,3]
# Use data
if t <= self.df_end + delay:
return self.get_data(t-delay)
# Projection into the future
else:
if self.spatial:
if self.doses:
# No projection implemented
return np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
else:
# No projection implemented
return np.zeros([self.space_agg,self.age_agg])
else:
if self.doses:
return self.booster_campaign(states, daily_doses, vacc_order, stop_idx, refusal)
else:
return self.unidose_2021_vaccination_campaign(states, initN, daily_doses, delay_immunity, vacc_order, stop_idx, refusal)
###################################
## Google social policy function ##
###################################
class make_contact_matrix_function():
"""
Class that returns contact matrix based on 4 prevention parameters by default, but has other policies defined as well.
Input
-----
Nc_all : dictionnary
contact matrices for home, schools, work, transport, leisure and others
df_google : dataframe
google mobility data
Output
------
__class__ : default function
Default output function, based on contact_matrix_4prev
"""
def __init__(self, df_google, Nc_all):
self.df_google = df_google.astype(float)
self.Nc_all = Nc_all
# Compute start and endtimes of dataframe
self.df_google_start = df_google.index.get_level_values('date')[0]
self.df_google_end = df_google.index.get_level_values('date')[-1]
# Check if provincial data is provided
self.provincial = None
if 'NIS' in self.df_google.index.names:
self.provincial = True
self.space_agg = len(self.df_google.index.get_level_values('NIS').unique().values)
@lru_cache() # once the function is run for a set of parameters, it doesn't need to compile again
def __call__(self, t, prev_home=1, prev_schools=1, prev_work=1, prev_rest = 1,
school=None, work=None, transport=None, leisure=None, others=None, home=None):
"""
t : timestamp
current date
prev_... : float [0,1]
prevention parameter to estimate
school, work, transport, leisure, others : float [0,1]
level of opening of these sectors
if None, it is calculated from google mobility data
only school cannot be None!
"""
if school is None:
raise ValueError(
"Please indicate to which extend schools are open")
places_var = [work, transport, leisure, others]
places_names = ['work', 'transport', 'leisure', 'others']
GCMR_names = ['work', 'transport', 'retail_recreation', 'grocery']
if self.provincial:
if t < pd.Timestamp('2020-03-17'):
return np.ones(self.space_agg)[:,np.newaxis,np.newaxis]*self.Nc_all['total']
elif pd.Timestamp('2020-03-17') <= t <= self.df_google_end:
# Extract row at timestep t
row = -self.df_google.loc[(t, slice(None)),:]/100
else:
# Extract last 14 days and take the mean
row = -self.df_google.loc[(self.df_google_end - pd.Timedelta(days=14)): self.df_google_end, slice(None)].mean(level='NIS')/100
# Sort NIS codes from low to high
row.sort_index(level='NIS', ascending=True,inplace=True)
# Extract values
values_dict={}
for idx,place in enumerate(places_var):
if place is None:
place = 1 - row[GCMR_names[idx]].values
else:
try:
test=len(place)
except:
place = place*np.ones(self.space_agg)
values_dict.update({places_names[idx]: place})
# Schools:
try:
test=len(school)
except:
school = school*np.ones(self.space_agg)
# Construct contact matrix
CM = (prev_home*np.ones(self.space_agg)[:, np.newaxis,np.newaxis]*self.Nc_all['home'] +
(prev_schools*school)[:, np.newaxis,np.newaxis]*self.Nc_all['schools'] +
(prev_work*values_dict['work'])[:,np.newaxis,np.newaxis]*self.Nc_all['work'] +
(prev_rest*values_dict['transport'])[:,np.newaxis,np.newaxis]*self.Nc_all['transport'] +
(prev_rest*values_dict['leisure'])[:,np.newaxis,np.newaxis]*self.Nc_all['leisure'] +
(prev_rest*values_dict['others'])[:,np.newaxis,np.newaxis]*self.Nc_all['others'])
else:
if t < pd.Timestamp('2020-03-17'):
return self.Nc_all['total']
elif pd.Timestamp('2020-03-17') <= t <= self.df_google_end:
# Extract row at timestep t
row = -self.df_google.loc[t]/100
else:
# Extract last 14 days and take the mean
row = -self.df_google[-14:-1].mean()/100
# Extract values
values_dict={}
for idx,place in enumerate(places_var):
if place is None:
place = 1 - row[GCMR_names[idx]]
values_dict.update({places_names[idx]: place})
# Construct contact matrix
CM = (prev_home*self.Nc_all['home'] +
prev_schools*school*self.Nc_all['schools'] +
prev_work*values_dict['work']*self.Nc_all['work'] +
prev_rest*values_dict['transport']*self.Nc_all['transport'] +
prev_rest*values_dict['leisure']*self.Nc_all['leisure'] +
prev_rest*values_dict['others']*self.Nc_all['others'])
return CM
def all_contact(self):
return self.Nc_all['total']
def all_contact_no_schools(self):
return self.Nc_all['total'] - self.Nc_all['schools']
def ramp_fun(self, Nc_old, Nc_new, t, t_start, l):
"""
t : timestamp
current simulation time
t_start : timestamp
start of policy change
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * float( (t-t_start)/pd.Timedelta('1D') )
def delayed_ramp_fun(self, Nc_old, Nc_new, t, tau_days, l, t_start):
"""
t : timestamp
current simulation time
t_start : timestamp
start of policy change
tau : int
number of days before measures start having an effect
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * float( (t-t_start-tau_days)/pd.Timedelta('1D') )
####################
## National model ##
####################
def policies_all(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array (9x9)
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-08-03') # Summer lockdown in Antwerp
t5 = pd.Timestamp('2020-08-24') # End of summer lockdown in Antwerp
t6 = pd.Timestamp('2020-09-01') # end of summer holidays
t7 = pd.Timestamp('2020-09-21') # Opening universities
# Define key dates of second wave
t8 = pd.Timestamp('2020-10-19') # lockdown (1)
t9 = pd.Timestamp('2020-11-02') # lockdown (2)
t10 = pd.Timestamp('2020-11-16') # schools re-open
t11 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t12 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t13 = pd.Timestamp('2021-02-15') # Spring break starts
t14 = pd.Timestamp('2021-02-21') # Spring break ends
t15 = pd.Timestamp('2021-02-28') # Contact increase in children
t16 = pd.Timestamp('2021-03-26') # Start of Easter holiday
t17 = pd.Timestamp('2021-04-18') # End of Easter holiday
t18 = pd.Timestamp('2021-06-01') # Start of lockdown relaxation
t19 = pd.Timestamp('2021-07-01') # Start of Summer holiday
t20 = pd.Timestamp('2021-09-01') # End of Summer holiday
t21 = pd.Timestamp('2021-09-21') # Opening of universities
t22 = pd.Timestamp('2021-10-01') # Flanders releases all measures
t23 = pd.Timestamp('2021-11-01') # Start of autumn break
t24 = pd.Timestamp('2021-11-07') # End of autumn break
t25 = pd.Timestamp('2021-12-26') # Start of Christmass break
t26 = pd.Timestamp('2022-01-06') # End of Christmass break
t27 = pd.Timestamp('2022-02-28') # Start of Spring Break
t28 = pd.Timestamp('2022-03-06') # End of Spring Break
t29 = pd.Timestamp('2022-04-04') # Start of Easter Break
t30 = pd.Timestamp('2022-04-17') # End of Easter Break
t31 = pd.Timestamp('2022-07-01') # Start of summer holidays
t32 = pd.Timestamp('2022-09-01') # End of summer holidays
t33 = pd.Timestamp('2022-09-21') # Opening of universities
t34 = pd.Timestamp('2022-10-31') # Start of autumn break
t35 = pd.Timestamp('2022-11-06') # End of autumn break
if t <= t1:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t1 < t <= t1 + l1_days:
t = pd.Timestamp(t.date())
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
return self.ramp_fun(policy_old, policy_new, t, t1, l1)
elif t1 + l1_days < t <= t2:
return self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
elif t2 < t <= t3:
l = (t3 - t2)/pd.Timedelta(days=1)
r = (t3 - t2)/(t4 - t2)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t2, l)
elif t3 < t <= t4:
l = (t4 - t3)/pd.Timedelta(days=1)
r = (t3 - t2)/(t4 - t2)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t3, l)
elif t4 < t <= t5:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
elif t5 < t <= t6:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
# Second wave
elif t6 < t <= t7:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0.7)
elif t7 < t <= t8:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t8 < t <= t8 + l2_days:
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_schools, prev_work, prev_rest_lockdown, school=1)
return self.ramp_fun(policy_old, policy_new, t, t8, l2)
elif t8 + l2_days < t <= t9:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t9 < t <= t10:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t10 < t <= t11:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t11 < t <= t12:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t12 < t <= t13:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t13 < t <= t14:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t14 < t <= t15:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t15 < t <= t16:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t16 < t <= t17:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t17 < t <= t18:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t18 < t <= t19:
l = (t19 - t18)/pd.Timedelta(days=1)
r = (t19 - t18)/(t20 - t18)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=1)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=1)
return self.ramp_fun(policy_old, policy_new, t, t18, l)
elif t19 < t <= t20:
l = (t20 - t19)/pd.Timedelta(days=1)
r = (t19 - t18)/(t20 - t18)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, 0.75*prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t19, l)
elif t20 < t <= t21:
return self.__call__(t, prev_home, prev_schools, prev_work, 0.75*prev_rest_relaxation, school=0.7)
elif t21 < t <= t22:
return self.__call__(t, prev_home, prev_schools, prev_work, 0.70*prev_rest_relaxation, school=1)
elif t22 < t <= t23:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t23 < t <= t24:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
elif t24 < t <= t25:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t25 < t <= t26:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t26 < t <= t27:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t27 < t <= t28:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
leisure=1.1, work=0.9, transport=1, others=1, school=0)
elif t28 < t <= t29:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t29 < t <= t30:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t30 < t <= t31:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t31 < t <= t32:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t32 < t <= t33:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=0.8)
elif t33 < t <= t34:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t34 < t <= t35:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.9, leisure=1.1, transport=1, others=1, school=0)
else:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
def policies_all_WAVE4(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home, date_measures, scenario):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-08-03') # Summer lockdown in Antwerp
t5 = pd.Timestamp('2020-08-24') # End of summer lockdown in Antwerp
t6 = pd.Timestamp('2020-09-01') # end of summer holidays
t7 = pd.Timestamp('2020-09-21') # Opening universities
# Define key dates of second wave
t8 = pd.Timestamp('2020-10-19') # lockdown (1)
t9 = pd.Timestamp('2020-11-02') # lockdown (2)
t10 = pd.Timestamp('2020-11-16') # schools re-open
t11 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t12 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t13 = pd.Timestamp('2021-02-15') # Spring break starts
t14 = pd.Timestamp('2021-02-21') # Spring break ends
t15 = pd.Timestamp('2021-02-28') # Contact increase in children
t16 = pd.Timestamp('2021-03-26') # Start of Easter holiday
t17 = pd.Timestamp('2021-04-18') # End of Easter holiday
t18 = pd.Timestamp('2021-06-01') # Start of lockdown relaxation
t19 = pd.Timestamp('2021-07-01') # Start of Summer holiday
t20 = pd.Timestamp('2021-09-01') # End of Summer holiday
t21 = pd.Timestamp('2021-09-21') # Opening of universities
t22 = pd.Timestamp('2021-10-01') # Flanders releases all measures
t23 = pd.Timestamp('2021-11-01') # Start of autumn break
t24 = pd.Timestamp('2021-11-07') # End of autumn break
# Fourth WAVE
t25 = pd.Timestamp('2021-11-22') # Start of mandatory telework + start easing in leisure restrictions
t26 = pd.Timestamp('2021-12-18') # Start of Christmass break for schools
t27 = pd.Timestamp('2021-12-26') # Start of Christmass break for general population
t28 = pd.Timestamp('2022-01-06') # End of Christmass break
t29 = pd.Timestamp('2022-01-28') # End of measures
t30 = pd.Timestamp('2022-02-28') # Start of Spring Break
t31 = pd.Timestamp('2022-03-06') # End of Spring Break
t32 = pd.Timestamp('2022-04-04') # Start of Easter Break
t33 = pd.Timestamp('2022-04-17') # End of Easter Break
t34 = pd.Timestamp('2022-07-01') # Start of summer holidays
t35 = pd.Timestamp('2022-09-01') # End of summer holidays
t36 = pd.Timestamp('2022-09-21') # Opening of universities
t37 = pd.Timestamp('2022-10-31') # Start of autumn break
t38 = pd.Timestamp('2022-11-06') # End of autumn break
scenarios_work = [1, 0.7, 0.7, 0.7, 0.7]
scenarios_schools = [1, 1, 1, 1, 1]
scenarios_leisure = [1, 1, 0.75, 0.50, 0.25]
if t <= t1:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t1 < t <= t1 + l1_days:
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
return self.ramp_fun(policy_old, policy_new, t, t1, l1)
elif t1 + l1_days < t <= t2:
return self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
elif t2 < t <= t3:
l = (t3 - t2)/pd.Timedelta(days=1)
r = (t3 - t2)/(t4 - t2)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t2, l)
elif t3 < t <= t4:
l = (t4 - t3)/pd.Timedelta(days=1)
r = (t3 - t2)/(t4 - t2)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t3, l)
elif t4 < t <= t5:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
elif t5 < t <= t6:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
# Second wave
elif t6 < t <= t7:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0.7)
elif t7 < t <= t8:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t8 < t <= t8 + l2_days:
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_schools, prev_work, prev_rest_lockdown, school=1)
return self.ramp_fun(policy_old, policy_new, t, t8, l2)
elif t8 + l2_days < t <= t9:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t9 < t <= t10:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t10 < t <= t11:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t11 < t <= t12:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t12 < t <= t13:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t13 < t <= t14:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t14 < t <= t15:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t15 < t <= t16:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t16 < t <= t17:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t17 < t <= t18:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t18 < t <= t19:
l = (t19 - t18)/pd.Timedelta(days=1)
r = (t19 - t18)/(t20 - t18)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=1)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=1)
return self.ramp_fun(policy_old, policy_new, t, t18, l)
elif t19 < t <= t20:
l = (t20 - t19)/pd.Timedelta(days=1)
r = (t19 - t18)/(t20 - t18)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, 0.75*prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t19, l)
elif t20 < t <= t21:
return self.__call__(t, prev_home, prev_schools, prev_work, 0.75*prev_rest_relaxation, school=0.7)
elif t21 < t <= t22:
return self.__call__(t, prev_home, prev_schools, prev_work, 0.70*prev_rest_relaxation, school=1)
elif t22 < t <= t23:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t23 < t <= t24:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
elif t24 < t <= t25:
# End of autumn break --> Date of measures
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t25 < t <= t25 + pd.Timedelta(5, unit='D'):
# Date of measures --> End easing in leisure restrictions
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, work=scenarios_work[scenario], school=1)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, work=scenarios_work[scenario], leisure=scenarios_leisure[scenario], school=scenarios_schools[scenario])
return self.ramp_fun(policy_old, policy_new, t, t25, 5)
elif t25 + pd.Timedelta(5, unit='D') < t <= t26:
# End easing in leisure restrictions --> Early schools closure before Christmas holiday
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, work=scenarios_work[scenario], leisure=scenarios_leisure[scenario], school=scenarios_schools[scenario])
elif t26 < t <= t27:
# Early schools closure before Christmas holiday --> Christmas holiday
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=scenarios_work[scenario], leisure=scenarios_leisure[scenario], school=0)
elif t27 < t <= t28:
# Christmas holiday
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=scenarios_work[scenario]-0.2, leisure=scenarios_leisure[scenario], transport=scenarios_work[scenario]-0.2, school=0)
elif t28 < t <= t29:
# Christmass holiday --> End of measures
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
leisure=scenarios_leisure[scenario], work=scenarios_work[scenario], school=1)
elif t29 < t <= t30:
# End of Measures --> Spring break
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
leisure=1, work=1, transport=1, others=1, school=1)
elif t30 < t <= t31:
# Spring Break
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1, transport=0.7, others=1, school=0)
elif t31 < t <= t32:
# Spring Break --> Easter
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t32 < t <= t33:
# Easter
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1, transport=1, others=1, school=0)
elif t33 < t <= t34:
# Easter --> Summer
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t34 < t <= t35:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1, transport=1, others=1, school=0)
elif t35 < t <= t36:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=0.7)
elif t36 < t <= t37:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t37 < t <= t38:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1, transport=1, others=1, school=0)
else:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
###################
## Spatial model ##
###################
def policies_all_spatial(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array (9x9)
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-08-07') # Summer lockdown in Antwerp
t5 = pd.Timestamp('2020-08-24') # End of summer lockdown in Antwerp
t6 = pd.Timestamp('2020-09-01') # end of summer holidays
t7 = pd.Timestamp('2020-09-21') # Opening universities
# Define key dates of second wave
t8 = pd.Timestamp('2020-10-19') # lockdown (1)
t9 = pd.Timestamp('2020-11-02') # lockdown (2)
t10 = pd.Timestamp('2020-11-16') # schools re-open
t11 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t12 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t13 = pd.Timestamp('2021-02-15') # Spring break starts
t14 = pd.Timestamp('2021-02-21') # Spring break ends
t15 = pd.Timestamp('2021-02-28') # Contact increase in children
t16 = pd.Timestamp('2021-03-26') # Start of Easter holiday
t17 = pd.Timestamp('2021-04-18') # End of Easter holiday
t18 = pd.Timestamp('2021-05-07') # Start of relaxations
t19 = pd.Timestamp('2021-07-01') # Start of Summer holiday
t20 = pd.Timestamp('2021-09-01') # End of Summer holiday
t21 = pd.Timestamp('2021-09-21') # Opening of universities
t22 = pd.Timestamp('2021-11-01') # Start of autumn break
t23 = pd.Timestamp('2021-11-07') # End of autumn break
t24 = pd.Timestamp('2021-12-26') # Start of Christmass break
t25 = pd.Timestamp('2022-01-06') # End of Christmass break
t26 = pd.Timestamp('2022-02-28') # Start of Spring Break
t27 = pd.Timestamp('2022-03-06') # End of Spring Break
t28 = pd.Timestamp('2022-04-04') # Start of Easter Break
t29 = pd.Timestamp('2022-04-17') # End of Easter Break
t30 = pd.Timestamp('2022-07-01') # Start of summer holidays
t31 = pd.Timestamp('2022-09-01') # End of summer holidays
t32 = pd.Timestamp('2022-09-21') # Opening of universities
t33 = pd.Timestamp('2022-10-31') # Start of autumn break
t34 = pd.Timestamp('2022-11-06') # End of autumn break
spatial_summer_lockdown_2020 = tuple(np.array([prev_rest_lockdown, prev_rest_lockdown, # F
prev_rest_lockdown, # W
prev_rest_lockdown, # Bxl
prev_rest_lockdown, prev_rest_lockdown, # F
prev_rest_relaxation, prev_rest_relaxation, # W
prev_rest_lockdown, # F
0.7*prev_rest_relaxation, 0.7*prev_rest_relaxation])) # W
co_F = 0.60
co_W = 0.50
co_Bxl = 0.45
spatial_summer_2021 = tuple(np.array([co_F*prev_rest_relaxation, co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, # W
co_Bxl*prev_rest_relaxation, # Bxl
co_F*prev_rest_relaxation, co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, co_W*prev_rest_relaxation, # W
co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, co_W*prev_rest_relaxation])) # W
co_F = 1.00
co_W = 0.50
co_Bxl = 0.45
relaxation_flanders_2021 = tuple(np.array([co_F*prev_rest_relaxation, co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, # W
co_Bxl*prev_rest_relaxation, # Bxl
co_F*prev_rest_relaxation, co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, co_W*prev_rest_relaxation, # W
co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, co_W*prev_rest_relaxation])) # W
if t <= t1:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1) #self.Nc_all['total']
elif t1 < t <= t1 + l1_days:
t = pd.Timestamp(t.date())
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1) #self.Nc_all['total']
policy_new = self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
return self.ramp_fun(policy_old, policy_new, t, t1, l1)
elif t1 + l1_days < t <= t2:
return self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
elif t2 < t <= t3:
l = (t3 - t2)/pd.Timedelta(days=1)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t2, l)
# 2020
elif t3 < t <= t4:
return self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_relaxation, school=0)
elif t4 < t <= t5:
return self.__call__(t, prev_home, prev_schools, prev_work, spatial_summer_lockdown_2020, school=0)
elif t5 < t <= t6:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
# Second wave
elif t6 < t <= t7:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0.8)
elif t7 < t <= t8:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t8 < t <= t8 + l2_days:
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_schools, prev_work, prev_rest_lockdown, school=1)
return self.ramp_fun(policy_old, policy_new, t, t8, l2)
elif t8 + l2_days < t <= t9:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t9 < t <= t10:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t10 < t <= t11:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t11 < t <= t12:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t12 < t <= t13:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t13 < t <= t14:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t14 < t <= t15:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t15 < t <= t16:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t16 < t <= t17:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t17 < t <= t18:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t18 < t <= t19:
l = (t19 - t18)/ | pd.Timedelta(days=1) | pandas.Timedelta |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created 2022
@author: <NAME>
"""
import pandas as pd
import numpy as np
import random
import itertools
from math import ceil
import time
from collections import defaultdict
#dictionary of nucleotide codons and their corresponding amino acids
nt_aa_dict = {
'A': ['GCT', 'GCC', 'GCA', 'GCG'],
'R': ['CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'],
'N': ['AAT', 'AAC'],
'D': ['GAT', 'GAC'],
'C': ['TGT', 'TGC'],
'Q': ['CAA', 'CAG'],
'E': ['GAA', 'GAG'],
'G': ['GGT', 'GGC', 'GGA', 'GGG'],
'H': ['CAT', 'CAC'],
'I': ['ATT', 'ATC', 'ATA'],
'L': ['CTT', 'CTC', 'CTA', 'CTG', 'TTA', 'TTG'],
'K': ['AAA', 'AAG'],
'M': ['ATG'],
'F': ['TTT', 'TTC'],
'P': ['CCT', 'CCC', 'CCA', 'CCG'],
'S': ['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'],
'T': ['ACT', 'ACC', 'ACA', 'ACG'],
'W': ['TGG'],
'Y': ['TAT', 'TAC'],
'V': ['GTT', 'GTC', 'GTA', 'GTG'],
'*': ['TAA', 'TGA', 'TAG']
}
def aa_to_nt(seq_list, target_list, aug_factor):
'''
Function which produces aug_factor unique nucleotide sequences for each amino acid sequence in
seq_list. The appropriate targets (labels) are maintained for the augmented sequences.
Parameters
----------
seq_list : list or pandas.core.series.Series
list or series of amino acid sequences
target_list : list or pandas.core.series.Series
list or series of 'targets' or class labels corresponding to the sequences
aug_factor : int
the augmentation factor. the number of unique nucleotide sequences to create per protein sequence
Returns
-------
out_df : pandas.core.frame.DataFrame
pandas dataframe containing augmented nucleotide sequences
'''
seq_dict = {}
target_dict = {}
for k in range(len(seq_list)):
seq_dict[k] = []
nt_codons_per_residue = {}
for i in range(len(seq_list[k])):
#determine possible nt codons per aa position
nt_codons_per_residue[str(i)] = nt_aa_dict[seq_list[k][i]]
#use itertools product function to create a list of all possible combinations of nt codons for given aa seq
nucleotides = list(itertools.islice(itertools.product(*nt_codons_per_residue.values()), aug_factor))
#convert list of tuples to list of strings
nucleotides = list(map(''.join,nucleotides))
tmp_target_list = []
for j in range(len(nucleotides)):
tmp_target_list.append(target_list[k])
seq_dict[k] = (nucleotides)
target_dict[k] = tmp_target_list
return seq_dict, target_dict
def nt_augmentation(input_seqs, final_data_len = 2e5, is_val_set = False):
'''
Wrapper function to setup nucleotide augmentation based on a desired augmented data length. If
is_val_set = True, then sequences will be backtranslated (from amino acids to nucleotides) without
augmentation
Parameters
----------
input_seqs : list or pandas.core.series.Series
list or series of amino acid sequences
final_data_len : int
desired length of final data set
is_val_set : bool
whether or not input_seqs is a validation set. If is_val_set = True, backtranslation without
augmentation is performed.
Returns
-------
out_df : pandas.core.frame.DataFrame
pandas dataframe containing augmented nucleotide sequences
'''
data_len = len(input_seqs)
#round the calculated fraction to a whole number to get qty for first augmentation
#this will augment the data greater than is necessary. i.e. for desired aug of 1.5, it will augment 2x
calculated_aug_factor = int(ceil(final_data_len/data_len))
if calculated_aug_factor == 0 or is_val_set == True: calculated_aug_factor = 1
#for her2 negatives, augmenting by 2 then subsampling will decrease the sequence diveristy of the majority class
#essentially acting as a majority class downsampling effect
#elif is_val_set =='her2_neg': calculated_aug_factor = 2
data = input_seqs.copy()
aa_seq_list = data['aaseq'].to_list()
target_list = data['target'].to_list()
seq_dict, target_dict = aa_to_nt(aa_seq_list, target_list = target_list, aug_factor = calculated_aug_factor)
#randomly downsample augmented data set to desired length
if is_val_set == False:
truncate_factor = final_data_len/data_len
len_seq_dict = sum([len(x) for x in seq_dict.values()]) #number of total nucleotide sequences in dictionary
#downsample augmented sequences by iteratively dropping one augmented nt seq from each
#aa seq until desired data size is reached
if final_data_len < len_seq_dict:
num_seqs_to_drop = int(len_seq_dict - final_data_len)
for i in range(num_seqs_to_drop):
seq_dict[i] = np.random.choice(seq_dict[i], len(seq_dict[i]) -1, replace=False)
target_dict[i] = np.random.choice(target_dict[i], len(target_dict[i]) -1, replace=False)
seq_out_list = []
target_out_list = []
for key in seq_dict:
for seq_entry in seq_dict[key]:
seq_out_list.append(seq_entry)
for target_entry in target_dict[key]:
target_out_list.append(target_entry)
out_df = | pd.DataFrame(seq_out_list) | pandas.DataFrame |
"""
Module contains tools for processing files into DataFrames or other objects
"""
from __future__ import annotations
from collections import abc
import csv
import sys
from textwrap import fill
from typing import Any
import warnings
import numpy as np
import pandas._libs.lib as lib
from pandas._libs.parsers import STR_NA_VALUES
from pandas._typing import (
ArrayLike,
DtypeArg,
FilePathOrBuffer,
StorageOptions,
)
from pandas.errors import (
AbstractMethodError,
ParserWarning,
)
from pandas.util._decorators import (
Appender,
deprecate_nonkeyword_arguments,
)
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.common import (
is_file_like,
is_float,
is_integer,
is_list_like,
)
from pandas.core import generic
from pandas.core.frame import DataFrame
from pandas.core.indexes.api import RangeIndex
from pandas.io.common import validate_header_arg
from pandas.io.parsers.base_parser import (
ParserBase,
is_index_col,
parser_defaults,
)
from pandas.io.parsers.c_parser_wrapper import CParserWrapper
from pandas.io.parsers.python_parser import (
FixedWidthFieldParser,
PythonParser,
)
_doc_read_csv_and_table = (
r"""
{summary}
Also supports optionally iterating or breaking of the file
into chunks.
Additional help can be found in the online docs for
`IO Tools <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html>`_.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, gs, and file. For file URLs, a host is
expected. A local file could be: file://localhost/path/to/table.csv.
If you want to pass in a path object, pandas accepts any ``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method, such as
a file handle (e.g. via builtin ``open`` function) or ``StringIO``.
sep : str, default {_default_sep}
Delimiter to use. If sep is None, the C engine cannot automatically detect
the separator, but the Python parsing engine can, meaning the latter will
be used and automatically detect the separator by Python's builtin sniffer
tool, ``csv.Sniffer``. In addition, separators longer than 1 character and
different from ``'\s+'`` will be interpreted as regular expressions and
will also force the use of the Python parsing engine. Note that regex
delimiters are prone to ignoring quoted data. Regex example: ``'\r\t'``.
delimiter : str, default ``None``
Alias for sep.
header : int, list of int, default 'infer'
Row number(s) to use as the column names, and the start of the
data. Default behavior is to infer the column names: if no names
are passed the behavior is identical to ``header=0`` and column
names are inferred from the first line of the file, if column
names are passed explicitly then the behavior is identical to
``header=None``. Explicitly pass ``header=0`` to be able to
replace existing names. The header can be a list of integers that
specify row locations for a multi-index on the columns
e.g. [0,1,3]. Intervening rows that are not specified will be
skipped (e.g. 2 in this example is skipped). Note that this
parameter ignores commented lines and empty lines if
``skip_blank_lines=True``, so ``header=0`` denotes the first line of
data rather than the first line of the file.
names : array-like, optional
List of column names to use. If the file contains a header row,
then you should explicitly pass ``header=0`` to override the column names.
Duplicates in this list are not allowed.
index_col : int, str, sequence of int / str, or False, default ``None``
Column(s) to use as the row labels of the ``DataFrame``, either given as
string name or column index. If a sequence of int / str is given, a
MultiIndex is used.
Note: ``index_col=False`` can be used to force pandas to *not* use the first
column as the index, e.g. when you have a malformed file with delimiters at
the end of each line.
usecols : list-like or callable, optional
Return a subset of the columns. If list-like, all elements must either
be positional (i.e. integer indices into the document columns) or strings
that correspond to column names provided either by the user in `names` or
inferred from the document header row(s). For example, a valid list-like
`usecols` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``.
Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``.
To instantiate a DataFrame from ``data`` with element order preserved use
``pd.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]`` for columns
in ``['foo', 'bar']`` order or
``pd.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]``
for ``['bar', 'foo']`` order.
If callable, the callable function will be evaluated against the column
names, returning names where the callable function evaluates to True. An
example of a valid callable argument would be ``lambda x: x.upper() in
['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster
parsing time and lower memory usage.
squeeze : bool, default False
If the parsed data only contains one column then return a Series.
prefix : str, optional
Prefix to add to column numbers when no header, e.g. 'X' for X0, X1, ...
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
dtype : Type name or dict of column -> type, optional
Data type for data or columns. E.g. {{'a': np.float64, 'b': np.int32,
'c': 'Int64'}}
Use `str` or `object` together with suitable `na_values` settings
to preserve and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
engine : {{'c', 'python'}}, optional
Parser engine to use. The C engine is faster while the python engine is
currently more feature-complete.
converters : dict, optional
Dict of functions for converting values in certain columns. Keys can either
be integers or column labels.
true_values : list, optional
Values to consider as True.
false_values : list, optional
Values to consider as False.
skipinitialspace : bool, default False
Skip spaces after delimiter.
skiprows : list-like, int or callable, optional
Line numbers to skip (0-indexed) or number of lines to skip (int)
at the start of the file.
If callable, the callable function will be evaluated against the row
indices, returning True if the row should be skipped and False otherwise.
An example of a valid callable argument would be ``lambda x: x in [0, 2]``.
skipfooter : int, default 0
Number of lines at bottom of file to skip (Unsupported with engine='c').
nrows : int, optional
Number of rows of file to read. Useful for reading pieces of large files.
na_values : scalar, str, list-like, or dict, optional
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted as
NaN: '"""
+ fill("', '".join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ")
+ """'.
keep_default_na : bool, default True
Whether or not to include the default NaN values when parsing the data.
Depending on whether `na_values` is passed in, the behavior is as follows:
* If `keep_default_na` is True, and `na_values` are specified, `na_values`
is appended to the default NaN values used for parsing.
* If `keep_default_na` is True, and `na_values` are not specified, only
the default NaN values are used for parsing.
* If `keep_default_na` is False, and `na_values` are specified, only
the NaN values specified `na_values` are used for parsing.
* If `keep_default_na` is False, and `na_values` are not specified, no
strings will be parsed as NaN.
Note that if `na_filter` is passed in as False, the `keep_default_na` and
`na_values` parameters will be ignored.
na_filter : bool, default True
Detect missing value markers (empty strings and the value of na_values). In
data without any NAs, passing na_filter=False can improve the performance
of reading a large file.
verbose : bool, default False
Indicate number of NA values placed in non-numeric columns.
skip_blank_lines : bool, default True
If True, skip over blank lines rather than interpreting as NaN values.
parse_dates : bool or list of int or names or list of lists or dict, \
default False
The behavior is as follows:
* boolean. If True -> try parsing the index.
* list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
a single date column.
* dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call
result 'foo'
If a column or index cannot be represented as an array of datetimes,
say because of an unparsable value or a mixture of timezones, the column
or index will be returned unaltered as an object data type. For
non-standard datetime parsing, use ``pd.to_datetime`` after
``pd.read_csv``. To parse an index or column with a mixture of timezones,
specify ``date_parser`` to be a partially-applied
:func:`pandas.to_datetime` with ``utc=True``. See
:ref:`io.csv.mixed_timezones` for more.
Note: A fast-path exists for iso8601-formatted dates.
infer_datetime_format : bool, default False
If True and `parse_dates` is enabled, pandas will attempt to infer the
format of the datetime strings in the columns, and if it can be inferred,
switch to a faster method of parsing them. In some cases this can increase
the parsing speed by 5-10x.
keep_date_col : bool, default False
If True and `parse_dates` specifies combining multiple columns then
keep the original columns.
date_parser : function, optional
Function to use for converting a sequence of string columns to an array of
datetime instances. The default uses ``dateutil.parser.parser`` to do the
conversion. Pandas will try to call `date_parser` in three different ways,
advancing to the next if an exception occurs: 1) Pass one or more arrays
(as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the
string values from the columns defined by `parse_dates` into a single array
and pass that; and 3) call `date_parser` once for each row using one or
more strings (corresponding to the columns defined by `parse_dates`) as
arguments.
dayfirst : bool, default False
DD/MM format dates, international and European format.
cache_dates : bool, default True
If True, use a cache of unique, converted dates to apply the datetime
conversion. May produce significant speed-up when parsing duplicate
date strings, especially ones with timezone offsets.
.. versionadded:: 0.25.0
iterator : bool, default False
Return TextFileReader object for iteration or getting chunks with
``get_chunk()``.
.. versionchanged:: 1.2
``TextFileReader`` is a context manager.
chunksize : int, optional
Return TextFileReader object for iteration.
See the `IO Tools docs
<https://pandas.pydata.org/pandas-docs/stable/io.html#io-chunking>`_
for more information on ``iterator`` and ``chunksize``.
.. versionchanged:: 1.2
``TextFileReader`` is a context manager.
compression : {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer' and
`filepath_or_buffer` is path-like, then detect compression from the
following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no
decompression). If using 'zip', the ZIP file must contain only one data
file to be read in. Set to None for no decompression.
thousands : str, optional
Thousands separator.
decimal : str, default '.'
Character to recognize as decimal point (e.g. use ',' for European data).
lineterminator : str (length 1), optional
Character to break file into lines. Only valid with C parser.
quotechar : str (length 1), optional
The character used to denote the start and end of a quoted item. Quoted
items can include the delimiter and it will be ignored.
quoting : int or csv.QUOTE_* instance, default 0
Control field quoting behavior per ``csv.QUOTE_*`` constants. Use one of
QUOTE_MINIMAL (0), QUOTE_ALL (1), QUOTE_NONNUMERIC (2) or QUOTE_NONE (3).
doublequote : bool, default ``True``
When quotechar is specified and quoting is not ``QUOTE_NONE``, indicate
whether or not to interpret two consecutive quotechar elements INSIDE a
field as a single ``quotechar`` element.
escapechar : str (length 1), optional
One-character string used to escape other characters.
comment : str, optional
Indicates remainder of line should not be parsed. If found at the beginning
of a line, the line will be ignored altogether. This parameter must be a
single character. Like empty lines (as long as ``skip_blank_lines=True``),
fully commented lines are ignored by the parameter `header` but not by
`skiprows`. For example, if ``comment='#'``, parsing
``#empty\\na,b,c\\n1,2,3`` with ``header=0`` will result in 'a,b,c' being
treated as the header.
encoding : str, optional
Encoding to use for UTF when reading/writing (ex. 'utf-8'). `List of Python
standard encodings
<https://docs.python.org/3/library/codecs.html#standard-encodings>`_ .
.. versionchanged:: 1.2
When ``encoding`` is ``None``, ``errors="replace"`` is passed to
``open()``. Otherwise, ``errors="strict"`` is passed to ``open()``.
This behavior was previously only the case for ``engine="python"``.
.. versionchanged:: 1.3.0
``encoding_errors`` is a new argument. ``encoding`` has no longer an
influence on how encoding errors are handled.
encoding_errors : str, optional, default "strict"
How encoding errors are treated. `List of possible values
<https://docs.python.org/3/library/codecs.html#error-handlers>`_ .
.. versionadded:: 1.3.0
dialect : str or csv.Dialect, optional
If provided, this parameter will override values (default or not) for the
following parameters: `delimiter`, `doublequote`, `escapechar`,
`skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to
override values, a ParserWarning will be issued. See csv.Dialect
documentation for more details.
error_bad_lines : bool, default ``None``
Lines with too many fields (e.g. a csv line with too many commas) will by
default cause an exception to be raised, and no DataFrame will be returned.
If False, then these "bad lines" will be dropped from the DataFrame that is
returned.
.. deprecated:: 1.3.0
The ``on_bad_lines`` parameter should be used instead to specify behavior upon
encountering a bad line instead.
warn_bad_lines : bool, default ``None``
If error_bad_lines is False, and warn_bad_lines is True, a warning for each
"bad line" will be output.
.. deprecated:: 1.3.0
The ``on_bad_lines`` parameter should be used instead to specify behavior upon
encountering a bad line instead.
on_bad_lines : {{'error', 'warn', 'skip'}}, default 'error'
Specifies what to do upon encountering a bad line (a line with too many fields).
Allowed values are :
- 'error', raise an Exception when a bad line is encountered.
- 'warn', raise a warning when a bad line is encountered and skip that line.
- 'skip', skip bad lines without raising or warning when they are encountered.
.. versionadded:: 1.3.0
delim_whitespace : bool, default False
Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be
used as the sep. Equivalent to setting ``sep='\\s+'``. If this option
is set to True, nothing should be passed in for the ``delimiter``
parameter.
low_memory : bool, default True
Internally process the file in chunks, resulting in lower memory use
while parsing, but possibly mixed type inference. To ensure no mixed
types either set False, or specify the type with the `dtype` parameter.
Note that the entire file is read into a single DataFrame regardless,
use the `chunksize` or `iterator` parameter to return the data in chunks.
(Only valid with C parser).
memory_map : bool, default False
If a filepath is provided for `filepath_or_buffer`, map the file object
directly onto memory and access the data directly from there. Using this
option can improve performance because there is no longer any I/O overhead.
float_precision : str, optional
Specifies which converter the C engine should use for floating-point
values. The options are ``None`` or 'high' for the ordinary converter,
'legacy' for the original lower precision pandas converter, and
'round_trip' for the round-trip converter.
.. versionchanged:: 1.2
{storage_options}
.. versionadded:: 1.2
Returns
-------
DataFrame or TextParser
A comma-separated values (csv) file is returned as two-dimensional
data structure with labeled axes.
See Also
--------
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
read_fwf : Read a table of fixed-width formatted lines into DataFrame.
Examples
--------
>>> pd.{func_name}('data.csv') # doctest: +SKIP
"""
)
_c_parser_defaults = {
"delim_whitespace": False,
"na_filter": True,
"low_memory": True,
"memory_map": False,
"error_bad_lines": None,
"warn_bad_lines": None,
"float_precision": None,
}
_fwf_defaults = {"colspecs": "infer", "infer_nrows": 100, "widths": None}
_c_unsupported = {"skipfooter"}
_python_unsupported = {"low_memory", "float_precision"}
_deprecated_defaults: dict[str, Any] = {"error_bad_lines": None, "warn_bad_lines": None}
_deprecated_args: set[str] = {"error_bad_lines", "warn_bad_lines"}
def validate_integer(name, val, min_val=0):
"""
Checks whether the 'name' parameter for parsing is either
an integer OR float that can SAFELY be cast to an integer
without losing accuracy. Raises a ValueError if that is
not the case.
Parameters
----------
name : str
Parameter name (used for error reporting)
val : int or float
The value to check
min_val : int
Minimum allowed value (val < min_val will result in a ValueError)
"""
msg = f"'{name:s}' must be an integer >={min_val:d}"
if val is not None:
if is_float(val):
if int(val) != val:
raise ValueError(msg)
val = int(val)
elif not (is_integer(val) and val >= min_val):
raise ValueError(msg)
return val
def _validate_names(names):
"""
Raise ValueError if the `names` parameter contains duplicates or has an
invalid data type.
Parameters
----------
names : array-like or None
An array containing a list of the names used for the output DataFrame.
Raises
------
ValueError
If names are not unique or are not ordered (e.g. set).
"""
if names is not None:
if len(names) != len(set(names)):
raise ValueError("Duplicate names are not allowed.")
if not (
is_list_like(names, allow_sets=False) or isinstance(names, abc.KeysView)
):
raise ValueError("Names should be an ordered collection.")
def _read(filepath_or_buffer: FilePathOrBuffer, kwds):
"""Generic reader of line files."""
if kwds.get("date_parser", None) is not None:
if isinstance(kwds["parse_dates"], bool):
kwds["parse_dates"] = True
# Extract some of the arguments (pass chunksize on).
iterator = kwds.get("iterator", False)
chunksize = validate_integer("chunksize", kwds.get("chunksize", None), 1)
nrows = kwds.get("nrows", None)
# Check for duplicates in names.
_validate_names(kwds.get("names", None))
# Create the parser.
parser = TextFileReader(filepath_or_buffer, **kwds)
if chunksize or iterator:
return parser
with parser:
return parser.read(nrows)
@deprecate_nonkeyword_arguments(
version=None, allowed_args=["filepath_or_buffer"], stacklevel=3
)
@Appender(
_doc_read_csv_and_table.format(
func_name="read_csv",
summary="Read a comma-separated values (csv) file into DataFrame.",
_default_sep="','",
storage_options=generic._shared_docs["storage_options"],
)
)
def read_csv(
filepath_or_buffer: FilePathOrBuffer,
sep=lib.no_default,
delimiter=None,
# Column and Index Locations and Names
header="infer",
names=lib.no_default,
index_col=None,
usecols=None,
squeeze=False,
prefix=lib.no_default,
mangle_dupe_cols=True,
# General Parsing Configuration
dtype: DtypeArg | None = None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
skipfooter=0,
nrows=None,
# NA and Missing Data Handling
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
# Datetime Handling
parse_dates=False,
infer_datetime_format=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
# Iteration
iterator=False,
chunksize=None,
# Quoting, Compression, and File Format
compression="infer",
thousands=None,
decimal: str = ".",
lineterminator=None,
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
doublequote=True,
escapechar=None,
comment=None,
encoding=None,
encoding_errors: str | None = "strict",
dialect=None,
# Error Handling
error_bad_lines=None,
warn_bad_lines=None,
# TODO (2.0): set on_bad_lines to "error".
# See _refine_defaults_read comment for why we do this.
on_bad_lines=None,
# Internal
delim_whitespace=False,
low_memory=_c_parser_defaults["low_memory"],
memory_map=False,
float_precision=None,
storage_options: StorageOptions = None,
):
# locals() should never be modified
kwds = locals().copy()
del kwds["filepath_or_buffer"]
del kwds["sep"]
kwds_defaults = _refine_defaults_read(
dialect,
delimiter,
delim_whitespace,
engine,
sep,
error_bad_lines,
warn_bad_lines,
on_bad_lines,
names,
prefix,
defaults={"delimiter": ","},
)
kwds.update(kwds_defaults)
return _read(filepath_or_buffer, kwds)
@deprecate_nonkeyword_arguments(
version=None, allowed_args=["filepath_or_buffer"], stacklevel=3
)
@Appender(
_doc_read_csv_and_table.format(
func_name="read_table",
summary="Read general delimited file into DataFrame.",
_default_sep=r"'\\t' (tab-stop)",
storage_options=generic._shared_docs["storage_options"],
)
)
def read_table(
filepath_or_buffer: FilePathOrBuffer,
sep=lib.no_default,
delimiter=None,
# Column and Index Locations and Names
header="infer",
names=lib.no_default,
index_col=None,
usecols=None,
squeeze=False,
prefix=lib.no_default,
mangle_dupe_cols=True,
# General Parsing Configuration
dtype: DtypeArg | None = None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
skipfooter=0,
nrows=None,
# NA and Missing Data Handling
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
# Datetime Handling
parse_dates=False,
infer_datetime_format=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
# Iteration
iterator=False,
chunksize=None,
# Quoting, Compression, and File Format
compression="infer",
thousands=None,
decimal: str = ".",
lineterminator=None,
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
doublequote=True,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
# Error Handling
error_bad_lines=None,
warn_bad_lines=None,
# TODO (2.0): set on_bad_lines to "error".
# See _refine_defaults_read comment for why we do this.
on_bad_lines=None,
encoding_errors: str | None = "strict",
# Internal
delim_whitespace=False,
low_memory=_c_parser_defaults["low_memory"],
memory_map=False,
float_precision=None,
):
# locals() should never be modified
kwds = locals().copy()
del kwds["filepath_or_buffer"]
del kwds["sep"]
kwds_defaults = _refine_defaults_read(
dialect,
delimiter,
delim_whitespace,
engine,
sep,
error_bad_lines,
warn_bad_lines,
on_bad_lines,
names,
prefix,
defaults={"delimiter": "\t"},
)
kwds.update(kwds_defaults)
return _read(filepath_or_buffer, kwds)
def read_fwf(
filepath_or_buffer: FilePathOrBuffer,
colspecs="infer",
widths=None,
infer_nrows=100,
**kwds,
):
r"""
Read a table of fixed-width formatted lines into DataFrame.
Also supports optionally iterating or breaking of the file
into chunks.
Additional help can be found in the `online docs for IO Tools
<https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html>`_.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be:
``file://localhost/path/to/table.csv``.
If you want to pass in a path object, pandas accepts any
``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handle (e.g. via builtin ``open`` function)
or ``StringIO``.
colspecs : list of tuple (int, int) or 'infer'. optional
A list of tuples giving the extents of the fixed-width
fields of each line as half-open intervals (i.e., [from, to[ ).
String value 'infer' can be used to instruct the parser to try
detecting the column specifications from the first 100 rows of
the data which are not being skipped via skiprows (default='infer').
widths : list of int, optional
A list of field widths which can be used instead of 'colspecs' if
the intervals are contiguous.
infer_nrows : int, default 100
The number of rows to consider when letting the parser determine the
`colspecs`.
**kwds : optional
Optional keyword arguments can be passed to ``TextFileReader``.
Returns
-------
DataFrame or TextParser
A comma-separated values (csv) file is returned as two-dimensional
data structure with labeled axes.
See Also
--------
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Examples
--------
>>> pd.read_fwf('data.csv') # doctest: +SKIP
"""
# Check input arguments.
if colspecs is None and widths is None:
raise ValueError("Must specify either colspecs or widths")
elif colspecs not in (None, "infer") and widths is not None:
raise ValueError("You must specify only one of 'widths' and 'colspecs'")
# Compute 'colspecs' from 'widths', if specified.
if widths is not None:
colspecs, col = [], 0
for w in widths:
colspecs.append((col, col + w))
col += w
kwds["colspecs"] = colspecs
kwds["infer_nrows"] = infer_nrows
kwds["engine"] = "python-fwf"
return _read(filepath_or_buffer, kwds)
class TextFileReader(abc.Iterator):
"""
Passed dialect overrides any of the related parser options
"""
def __init__(self, f, engine=None, **kwds):
self.f = f
if engine is not None:
engine_specified = True
else:
engine = "python"
engine_specified = False
self.engine = engine
self._engine_specified = kwds.get("engine_specified", engine_specified)
_validate_skipfooter(kwds)
dialect = _extract_dialect(kwds)
if dialect is not None:
kwds = _merge_with_dialect_properties(dialect, kwds)
if kwds.get("header", "infer") == "infer":
kwds["header"] = 0 if kwds.get("names") is None else None
self.orig_options = kwds
# miscellanea
self._currow = 0
options = self._get_options_with_defaults(engine)
options["storage_options"] = kwds.get("storage_options", None)
self.chunksize = options.pop("chunksize", None)
self.nrows = options.pop("nrows", None)
self.squeeze = options.pop("squeeze", False)
self._check_file_or_buffer(f, engine)
self.options, self.engine = self._clean_options(options, engine)
if "has_index_names" in kwds:
self.options["has_index_names"] = kwds["has_index_names"]
self._engine = self._make_engine(self.engine)
def close(self):
self._engine.close()
def _get_options_with_defaults(self, engine):
kwds = self.orig_options
options = {}
default: object | None
for argname, default in parser_defaults.items():
value = kwds.get(argname, default)
# see gh-12935
if argname == "mangle_dupe_cols" and not value:
raise ValueError("Setting mangle_dupe_cols=False is not supported yet")
else:
options[argname] = value
for argname, default in _c_parser_defaults.items():
if argname in kwds:
value = kwds[argname]
if engine != "c" and value != default:
if "python" in engine and argname not in _python_unsupported:
pass
elif value == _deprecated_defaults.get(argname, default):
pass
else:
raise ValueError(
f"The {repr(argname)} option is not supported with the "
f"{repr(engine)} engine"
)
else:
value = _deprecated_defaults.get(argname, default)
options[argname] = value
if engine == "python-fwf":
for argname, default in _fwf_defaults.items():
options[argname] = kwds.get(argname, default)
return options
def _check_file_or_buffer(self, f, engine):
# see gh-16530
if is_file_like(f) and engine != "c" and not hasattr(f, "__next__"):
# The C engine doesn't need the file-like to have the "__next__"
# attribute. However, the Python engine explicitly calls
# "__next__(...)" when iterating through such an object, meaning it
# needs to have that attribute
raise ValueError(
"The 'python' engine cannot iterate through this file buffer."
)
def _clean_options(self, options, engine):
result = options.copy()
fallback_reason = None
# C engine not supported yet
if engine == "c":
if options["skipfooter"] > 0:
fallback_reason = "the 'c' engine does not support skipfooter"
engine = "python"
sep = options["delimiter"]
delim_whitespace = options["delim_whitespace"]
if sep is None and not delim_whitespace:
if engine == "c":
fallback_reason = (
"the 'c' engine does not support "
"sep=None with delim_whitespace=False"
)
engine = "python"
elif sep is not None and len(sep) > 1:
if engine == "c" and sep == r"\s+":
result["delim_whitespace"] = True
del result["delimiter"]
elif engine not in ("python", "python-fwf"):
# wait until regex engine integrated
fallback_reason = (
"the 'c' engine does not support "
"regex separators (separators > 1 char and "
r"different from '\s+' are interpreted as regex)"
)
engine = "python"
elif delim_whitespace:
if "python" in engine:
result["delimiter"] = r"\s+"
elif sep is not None:
encodeable = True
encoding = sys.getfilesystemencoding() or "utf-8"
try:
if len(sep.encode(encoding)) > 1:
encodeable = False
except UnicodeDecodeError:
encodeable = False
if not encodeable and engine not in ("python", "python-fwf"):
fallback_reason = (
f"the separator encoded in {encoding} "
"is > 1 char long, and the 'c' engine "
"does not support such separators"
)
engine = "python"
quotechar = options["quotechar"]
if quotechar is not None and isinstance(quotechar, (str, bytes)):
if (
len(quotechar) == 1
and ord(quotechar) > 127
and engine not in ("python", "python-fwf")
):
fallback_reason = (
"ord(quotechar) > 127, meaning the "
"quotechar is larger than one byte, "
"and the 'c' engine does not support such quotechars"
)
engine = "python"
if fallback_reason and self._engine_specified:
raise ValueError(fallback_reason)
if engine == "c":
for arg in _c_unsupported:
del result[arg]
if "python" in engine:
for arg in _python_unsupported:
if fallback_reason and result[arg] != _c_parser_defaults[arg]:
raise ValueError(
"Falling back to the 'python' engine because "
f"{fallback_reason}, but this causes {repr(arg)} to be "
"ignored as it is not supported by the 'python' engine."
)
del result[arg]
if fallback_reason:
warnings.warn(
(
"Falling back to the 'python' engine because "
f"{fallback_reason}; you can avoid this warning by specifying "
"engine='python'."
),
ParserWarning,
stacklevel=5,
)
index_col = options["index_col"]
names = options["names"]
converters = options["converters"]
na_values = options["na_values"]
skiprows = options["skiprows"]
validate_header_arg(options["header"])
for arg in _deprecated_args:
parser_default = _c_parser_defaults[arg]
depr_default = _deprecated_defaults[arg]
if result.get(arg, depr_default) != depr_default:
msg = (
f"The {arg} argument has been deprecated and will be "
"removed in a future version.\n\n"
)
warnings.warn(msg, FutureWarning, stacklevel=7)
else:
result[arg] = parser_default
if index_col is True:
raise ValueError("The value of index_col couldn't be 'True'")
if | is_index_col(index_col) | pandas.io.parsers.base_parser.is_index_col |
import pandas as pd
def get_toy_data_seqclassification():
train_data = {
"sentence1": [
'Amrozi accused his brother , whom he called " the witness " , of deliberately distorting his evidence .',
"Yucaipa owned Dominick 's before selling the chain to Safeway in 1998 for $ 2.5 billion .",
"They had published an advertisement on the Internet on June 10 , offering the cargo for sale , he added .",
"Around 0335 GMT , Tab shares were up 19 cents , or 4.4 % , at A $ 4.56 , having earlier set a record high of A $ 4.57 .",
],
"sentence2": [
'Referring to him as only " the witness " , Amrozi accused his brother of deliberately distorting his evidence .',
"Yucaipa bought Dominick 's in 1995 for $ 693 million and sold it to Safeway for $ 1.8 billion in 1998 .",
"On June 10 , the ship 's owners had published an advertisement on the Internet , offering the explosives for sale .",
"Tab shares jumped 20 cents , or 4.6 % , to set a record closing high at A $ 4.57 .",
],
"label": [1, 0, 1, 0],
"idx": [0, 1, 2, 3],
}
train_dataset = pd.DataFrame(train_data)
dev_data = {
"sentence1": [
"The stock rose $ 2.11 , or about 11 percent , to close Friday at $ 21.51 on the New York Stock Exchange .",
"Revenue in the first quarter of the year dropped 15 percent from the same period a year earlier .",
"The Nasdaq had a weekly gain of 17.27 , or 1.2 percent , closing at 1,520.15 on Friday .",
"The DVD-CCA then appealed to the state Supreme Court .",
],
"sentence2": [
"PG & E Corp. shares jumped $ 1.63 or 8 percent to $ 21.03 on the New York Stock Exchange on Friday .",
"With the scandal hanging over Stewart 's company , revenue the first quarter of the year dropped 15 percent from the same period a year earlier .",
"The tech-laced Nasdaq Composite .IXIC rallied 30.46 points , or 2.04 percent , to 1,520.15 .",
"The DVD CCA appealed that decision to the U.S. Supreme Court .",
],
"label": [1, 1, 0, 1],
"idx": [4, 5, 6, 7],
}
dev_dataset = pd.DataFrame(dev_data)
test_data = {
"sentence1": [
"That compared with $ 35.18 million , or 24 cents per share , in the year-ago period .",
"Shares of Genentech , a much larger company with several products on the market , rose more than 2 percent .",
"Legislation making it harder for consumers to erase their debts in bankruptcy court won overwhelming House approval in March .",
"The Nasdaq composite index increased 10.73 , or 0.7 percent , to 1,514.77 .",
],
"sentence2": [
"Earnings were affected by a non-recurring $ 8 million tax benefit in the year-ago period .",
"Shares of Xoma fell 16 percent in early trade , while shares of Genentech , a much larger company with several products on the market , were up 2 percent .",
"Legislation making it harder for consumers to erase their debts in bankruptcy court won speedy , House approval in March and was endorsed by the White House .",
"The Nasdaq Composite index , full of technology stocks , was lately up around 18 points .",
],
"label": [0, 0, 0, 0],
"idx": [8, 10, 11, 12],
}
test_dataset = pd.DataFrame(test_data)
custom_sent_keys = ["sentence1", "sentence2"]
label_key = "label"
X_train = train_dataset[custom_sent_keys]
y_train = train_dataset[label_key]
X_val = dev_dataset[custom_sent_keys]
y_val = dev_dataset[label_key]
X_test = test_dataset[custom_sent_keys]
return X_train, y_train, X_val, y_val, X_test
def get_toy_data_multiclassclassification():
train_data = {
"text": [
"i didnt feel humiliated",
"i can go from feeling so hopeless to so damned hopeful just from being around someone who cares and is awake",
"im grabbing a minute to post i feel greedy wrong",
"i am ever feeling nostalgic about the fireplace i will know that it is still on the property",
"i am feeling grouchy",
"ive been feeling a little burdened lately wasnt sure why that was",
"ive been taking or milligrams or times recommended amount and ive fallen asleep a lot faster but i also feel like so funny",
"i feel as confused about life as a teenager or as jaded as a year old man",
"i have been with petronas for years i feel that petronas has performed well and made a huge profit",
"i feel romantic too",
"i feel like i have to make the suffering i m seeing mean something",
"i do feel that running is a divine experience and that i can expect to have some type of spiritual encounter",
],
"label": [0, 0, 3, 2, 3, 0, 5, 4, 1, 2, 0, 1],
}
train_dataset = pd.DataFrame(train_data)
dev_data = {
"text": [
"i think it s the easiest time of year to feel dissatisfied",
"i feel low energy i m just thirsty",
"i have immense sympathy with the general point but as a possible proto writer trying to find time to write in the corners of life and with no sign of an agent let alone a publishing contract this feels a little precious",
"i do not feel reassured anxiety is on each side",
],
"label": [3, 0, 1, 1],
}
dev_dataset = pd.DataFrame(dev_data)
custom_sent_keys = ["text"]
label_key = "label"
X_train = train_dataset[custom_sent_keys]
y_train = train_dataset[label_key]
X_val = dev_dataset[custom_sent_keys]
y_val = dev_dataset[label_key]
return X_train, y_train, X_val, y_val
def get_toy_data_multiplechoiceclassification():
train_data = {
"video-id": [
"anetv_fruimvo90vA",
"anetv_fruimvo90vA",
"anetv_fruimvo90vA",
"anetv_MldEr60j33M",
"lsmdc0049_Hannah_and_her_sisters-69438",
],
"fold-ind": ["10030", "10030", "10030", "5488", "17405"],
"startphrase": [
"A woman is seen running down a long track and jumping into a pit. The camera",
"A woman is seen running down a long track and jumping into a pit. The camera",
"A woman is seen running down a long track and jumping into a pit. The camera",
"A man in a white shirt bends over and picks up a large weight. He",
"Someone furiously shakes someone away. He",
],
"sent1": [
"A woman is seen running down a long track and jumping into a pit.",
"A woman is seen running down a long track and jumping into a pit.",
"A woman is seen running down a long track and jumping into a pit.",
"A man in a white shirt bends over and picks up a large weight.",
"Someone furiously shakes someone away.",
],
"sent2": ["The camera", "The camera", "The camera", "He", "He"],
"gold-source": ["gen", "gen", "gold", "gen", "gold"],
"ending0": [
"captures her as well as lifting weights down in place.",
"follows her spinning her body around and ends by walking down a lane.",
"watches her as she walks away and sticks her tongue out to another person.",
"lifts the weights over his head.",
"runs to a woman standing waiting.",
],
"ending1": [
"pans up to show another woman running down the track.",
"pans around the two.",
"captures her as well as lifting weights down in place.",
"also lifts it onto his chest before hanging it back out again.",
"tackles him into the passenger seat.",
],
"ending2": [
"follows her movements as the group members follow her instructions.",
"captures her as well as lifting weights down in place.",
"follows her spinning her body around and ends by walking down a lane.",
"spins around and lifts a barbell onto the floor.",
"pounds his fist against a cupboard.",
],
"ending3": [
"follows her spinning her body around and ends by walking down a lane.",
"follows her movements as the group members follow her instructions.",
"pans around the two.",
"bends down and lifts the weight over his head.",
"offers someone the cup on his elbow and strides out.",
],
"label": [1, 3, 0, 0, 2],
}
dev_data = {
"video-id": [
"lsmdc3001_21_JUMP_STREET-422",
"lsmdc0001_American_Beauty-45991",
"lsmdc0001_American_Beauty-45991",
"lsmdc0001_American_Beauty-45991",
],
"fold-ind": ["11783", "10977", "10970", "10968"],
"startphrase": [
"Firing wildly he shoots holes through the tanker. He",
"He puts his spatula down. The Mercedes",
"He stands and looks around, his eyes finally landing on: "
"The digicam and a stack of cassettes on a shelf. Someone",
"He starts going through someone's bureau. He opens the drawer "
"in which we know someone keeps his marijuana, but he",
],
"sent1": [
"Firing wildly he shoots holes through the tanker.",
"He puts his spatula down.",
"He stands and looks around, his eyes finally landing on: "
"The digicam and a stack of cassettes on a shelf.",
"He starts going through someone's bureau.",
],
"sent2": [
"He",
"<NAME>",
"Someone",
"He opens the drawer in which we know someone keeps his marijuana, but he",
],
"gold-source": ["gold", "gold", "gold", "gold"],
"ending0": [
"overtakes the rig and falls off his bike.",
"fly open and drinks.",
"looks at someone's papers.",
"stops one down and rubs a piece of the gift out.",
],
"ending1": [
"squeezes relentlessly on the peanut jelly as well.",
"walks off followed driveway again.",
"feels around it and falls in the seat once more.",
"cuts the mangled parts.",
],
"ending2": [
"scrambles behind himself and comes in other directions.",
"slots them into a separate green.",
"sprints back from the wreck and drops onto his back.",
"hides it under his hat to watch.",
],
"ending3": [
"sweeps a explodes and knocks someone off.",
"pulls around to the drive - thru window.",
"sits at the kitchen table, staring off into space.",
"does n't discover its false bottom.",
],
"label": [0, 3, 3, 3],
}
test_data = {
"video-id": [
"lsmdc0001_American_Beauty-45991",
"lsmdc0001_American_Beauty-45991",
"lsmdc0001_American_Beauty-45991",
"lsmdc0001_American_Beauty-45991",
],
"fold-ind": ["10980", "10976", "10978", "10969"],
"startphrase": [
"Someone leans out of the drive - thru window, "
"grinning at her, holding bags filled with fast food. The Counter Girl",
"Someone looks up suddenly when he hears. He",
"Someone drives; someone sits beside her. They",
"He opens the drawer in which we know someone "
"keeps his marijuana, but he does n't discover"
" its false bottom. He stands and looks around, his eyes",
],
"sent1": [
"Someone leans out of the drive - thru "
"window, grinning at her, holding bags filled with fast food.",
"Someone looks up suddenly when he hears.",
"Someone drives; someone sits beside her.",
"He opens the drawer in which we know"
" someone keeps his marijuana, but he does n't discover its false bottom.",
],
"sent2": [
"The Counter Girl",
"He",
"They",
"He stands and looks around, his eyes",
],
"gold-source": ["gold", "gold", "gold", "gold"],
"ending0": [
"stands next to him, staring blankly.",
"puts his spatula down.",
"rise someone's feet up.",
"moving to the side, the houses rapidly stained.",
],
"ending1": [
"with auditorium, filmed, singers the club.",
"bumps into a revolver and drops surreptitiously into his weapon.",
"lift her and they are alarmed.",
"focused as the sight of someone making his way down a trail.",
],
"ending2": [
"attempts to block her ransacked.",
"talks using the phone and walks away for a few seconds.",
"are too involved with each other to "
"notice someone watching them from the drive - thru window.",
"finally landing on: the digicam and a stack of cassettes on a shelf.",
],
"ending3": [
"is eating solid and stinky.",
"bundles the flaxen powder beneath the car.",
"sit at a table with a beer from a table.",
"deep and continuing, its bleed - length sideburns pressing on him.",
],
"label": [0, 0, 2, 2],
}
train_dataset = pd.DataFrame(train_data)
dev_dataset = pd.DataFrame(dev_data)
test_dataset = pd.DataFrame(test_data)
custom_sent_keys = [
"sent1",
"sent2",
"ending0",
"ending1",
"ending2",
"ending3",
"gold-source",
"video-id",
"startphrase",
"fold-ind",
]
label_key = "label"
X_train = train_dataset[custom_sent_keys]
y_train = train_dataset[label_key]
X_val = dev_dataset[custom_sent_keys]
y_val = dev_dataset[label_key]
X_test = test_dataset[custom_sent_keys]
y_test = test_dataset[label_key]
return X_train, y_train, X_val, y_val, X_test, y_test
def get_toy_data_seqregression():
train_data = {
"sentence1": [
"A plane is taking off.",
"A man is playing a large flute.",
"A man is spreading shreded cheese on a pizza.",
"Three men are playing chess.",
],
"sentence2": [
"An air plane is taking off.",
"A man is playing a flute.",
"A man is spreading shredded cheese on an uncooked pizza.",
"Two men are playing chess.",
],
"label": [5.0, 3.799999952316284, 3.799999952316284, 2.5999999046325684],
"idx": [0, 1, 2, 3],
}
train_dataset = pd.DataFrame(train_data)
dev_data = {
"sentence1": [
"A man is playing the cello.",
"Some men are fighting.",
"A man is smoking.",
"The man is playing the piano.",
],
"sentence2": [
"A man seated is playing the cello.",
"Two men are fighting.",
"A man is skating.",
"The man is playing the guitar.",
],
"label": [4.25, 4.25, 0.5, 1.600000023841858],
"idx": [4, 5, 6, 7],
}
dev_dataset = pd.DataFrame(dev_data)
custom_sent_keys = ["sentence1", "sentence2"]
label_key = "label"
X_train = train_dataset[custom_sent_keys]
y_train = train_dataset[label_key]
X_val = dev_dataset[custom_sent_keys]
y_val = dev_dataset[label_key]
return X_train, y_train, X_val, y_val
def get_toy_data_summarization():
train_dataset = pd.DataFrame(
[
("The cat is alive", "The cat is dead"),
("The cat is alive", "The cat is dead"),
("The cat is alive", "The cat is dead"),
("The cat is alive", "The cat is dead"),
]
)
dev_dataset = pd.DataFrame(
[
("The old woman is beautiful", "The old woman is ugly"),
("The old woman is beautiful", "The old woman is ugly"),
("The old woman is beautiful", "The old woman is ugly"),
("The old woman is beautiful", "The old woman is ugly"),
]
)
test_dataset = pd.DataFrame(
[
("The purse is cheap", "The purse is expensive"),
("The purse is cheap", "The purse is expensive"),
("The purse is cheap", "The purse is expensive"),
("The purse is cheap", "The purse is expensive"),
]
)
for each_dataset in [train_dataset, dev_dataset, test_dataset]:
each_dataset.columns = ["document", "summary"]
custom_sent_keys = ["document"]
label_key = "summary"
X_train = train_dataset[custom_sent_keys]
y_train = train_dataset[label_key]
X_val = dev_dataset[custom_sent_keys]
y_val = dev_dataset[label_key]
X_test = test_dataset[custom_sent_keys]
return X_train, y_train, X_val, y_val, X_test
def get_toy_data_tokenclassification():
train_data = {
"chunk_tags": [
[11, 21, 11, 12, 21, 22, 11, 12, 0],
[11, 12],
[11, 12],
[
11,
12,
12,
21,
13,
11,
11,
21,
13,
11,
12,
13,
11,
21,
22,
11,
12,
17,
11,
21,
17,
11,
12,
12,
21,
22,
22,
13,
11,
0,
],
],
"id": ["0", "1", "2", "3"],
"ner_tags": [
[3, 0, 7, 0, 0, 0, 7, 0, 0],
[1, 2],
[5, 0],
[
0,
3,
4,
0,
0,
0,
0,
0,
0,
7,
0,
0,
0,
0,
0,
7,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
],
"pos_tags": [
[22, 42, 16, 21, 35, 37, 16, 21, 7],
[22, 22],
[22, 11],
[
12,
22,
22,
38,
15,
22,
28,
38,
15,
16,
21,
35,
24,
35,
37,
16,
21,
15,
24,
41,
15,
16,
21,
21,
20,
37,
40,
35,
21,
7,
],
],
"tokens": [
[
"EU",
"rejects",
"German",
"call",
"to",
"boycott",
"British",
"lamb",
".",
],
["Peter", "Blackburn"],
["BRUSSELS", "1996-08-22"],
[
"The",
"European",
"Commission",
"said",
"on",
"Thursday",
"it",
"disagreed",
"with",
"German",
"advice",
"to",
"consumers",
"to",
"shun",
"British",
"lamb",
"until",
"scientists",
"determine",
"whether",
"mad",
"cow",
"disease",
"can",
"be",
"transmitted",
"to",
"sheep",
".",
],
],
}
dev_data = {
"chunk_tags": [
[
11,
11,
12,
13,
11,
12,
12,
11,
12,
12,
12,
12,
21,
13,
11,
12,
21,
22,
11,
13,
11,
1,
13,
11,
17,
11,
12,
12,
21,
1,
0,
],
[
0,
11,
21,
22,
22,
11,
12,
12,
17,
11,
21,
22,
22,
11,
12,
13,
11,
0,
0,
11,
12,
11,
12,
12,
12,
12,
12,
12,
21,
11,
12,
12,
0,
],
[
11,
21,
11,
12,
12,
21,
22,
0,
17,
11,
21,
22,
17,
11,
21,
22,
11,
21,
22,
22,
13,
11,
12,
12,
0,
],
[
11,
21,
11,
12,
11,
12,
13,
11,
12,
12,
12,
12,
21,
22,
11,
12,
0,
11,
0,
11,
12,
13,
11,
12,
12,
12,
12,
12,
21,
11,
12,
1,
2,
2,
11,
21,
22,
11,
12,
0,
],
],
"id": ["4", "5", "6", "7"],
"ner_tags": [
[
5,
0,
0,
0,
0,
3,
4,
0,
0,
0,
1,
2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
5,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
3,
0,
0,
0,
1,
2,
2,
2,
0,
0,
0,
0,
0,
],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0],
[
0,
0,
0,
0,
0,
0,
0,
3,
0,
0,
1,
2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
],
"pos_tags": [
[
22,
27,
21,
35,
12,
22,
22,
27,
16,
21,
22,
22,
38,
15,
22,
24,
20,
37,
21,
15,
24,
16,
15,
22,
15,
12,
16,
21,
38,
17,
7,
],
[
0,
28,
41,
30,
37,
12,
16,
21,
15,
28,
41,
30,
37,
12,
24,
15,
28,
6,
0,
12,
22,
27,
16,
21,
22,
22,
14,
22,
38,
12,
21,
21,
7,
],
[
28,
38,
16,
16,
21,
38,
40,
10,
15,
28,
38,
40,
15,
21,
38,
40,
28,
20,
37,
40,
15,
12,
22,
22,
7,
],
[
28,
38,
12,
21,
16,
21,
15,
22,
22,
22,
22,
22,
35,
37,
21,
24,
6,
24,
10,
16,
24,
15,
12,
21,
10,
21,
21,
24,
38,
12,
30,
16,
10,
16,
21,
35,
37,
16,
21,
7,
],
],
"tokens": [
[
"Germany",
"'s",
"representative",
"to",
"the",
"European",
"Union",
"'s",
"veterinary",
"committee",
"Werner",
"Zwingmann",
"said",
"on",
"Wednesday",
"consumers",
"should",
"buy",
"sheepmeat",
"from",
"countries",
"other",
"than",
"Britain",
"until",
"the",
"scientific",
"advice",
"was",
"clearer",
".",
],
[
'"',
"We",
"do",
"n't",
"support",
"any",
"such",
"recommendation",
"because",
"we",
"do",
"n't",
"see",
"any",
"grounds",
"for",
"it",
",",
'"',
"the",
"Commission",
"'s",
"chief",
"spokesman",
"Nikolaus",
"van",
"der",
"Pas",
"told",
"a",
"news",
"briefing",
".",
],
[
"He",
"said",
"further",
"scientific",
"study",
"was",
"required",
"and",
"if",
"it",
"was",
"found",
"that",
"action",
"was",
"needed",
"it",
"should",
"be",
"taken",
"by",
"the",
"European",
"Union",
".",
],
[
"He",
"said",
"a",
"proposal",
"last",
"month",
"by",
"EU",
"Farm",
"Commissioner",
"Franz",
"Fischler",
"to",
"ban",
"sheep",
"brains",
",",
"spleens",
"and",
"spinal",
"cords",
"from",
"the",
"human",
"and",
"animal",
"food",
"chains",
"was",
"a",
"highly",
"specific",
"and",
"precautionary",
"move",
"to",
"protect",
"human",
"health",
".",
],
],
}
train_dataset = pd.DataFrame(train_data)
dev_dataset = | pd.DataFrame(dev_data) | pandas.DataFrame |
"""
Detection Recipe - 192.168.3.11
References:
(1) 'Asteroseismic detection predictions: TESS' by Chaplin (2015)
(2) 'On the use of empirical bolometric corrections for stars' by Torres (2010)
(3) 'The amplitude of solar oscillations using stellar techniques' by Kjeldson (2008)
(4) 'An absolutely calibrated Teff scale from the infrared flux method'
by Casagrande (2010) table 4
(5) 'Characterization of the power excess of solar-like oscillations in red giants with Kepler'
by Mosser (2011)
(6) 'Predicting the detectability of oscillations in solar-type stars observed by Kepler'
by Chaplin (2011)
(7) 'The connection between stellar granulation and oscillation as seen by the Kepler mission'
by Kallinger et al (2014)
(8) 'The Transiting Exoplanet Survey Satellite: Simulations of Planet Detections and
Astrophysical False Positives' by Sullivan et al. (2015)
(9) Astropysics module at https://pythonhosted.org/Astropysics/coremods/coords.html
(10) <NAME>'s calc_noise IDL procedure for TESS.
(11) <NAME>lin's soldet6 IDL procedure to calculate the probability of detecting
oscillations with Kepler.
(12) Coordinate conversion at https://ned.ipac.caltech.edu/forms/calculator.html
(13) Bedding 1996
(14) 'The Asteroseismic potential of TESS' by Campante et al. 2016
"""
import numpy as np
from itertools import groupby
from operator import itemgetter
import sys
import pandas as pd
from scipy import stats
import warnings
warnings.simplefilter("ignore")
def bv2teff(b_v):
# from Torres 2010 table 2. Applies to MS, SGB and giant stars
# B-V limits from Flower 1996 fig 5
a = 3.979145106714099
b = -0.654992268598245
c = 1.740690042385095
d = -4.608815154057166
e = 6.792599779944473
f = -5.396909891322525
g = 2.192970376522490
h = -0.359495739295671
lteff = a + b*b_v + c*(b_v**2) + d*(b_v**3) + e*(b_v**4) + f*(b_v**5) + g*(b_v**6) + h*(b_v**7)
teff = 10.0**lteff
return teff
# from <NAME> 2003. BCv values from Flower 1996 polynomials presented in Torres 2010
# Av is a keword argument. If reddening values not available, ignore it's effect
def Teff2bc2lum(teff, parallax, parallax_err, vmag, Av=0):
lteff = np.log10(teff)
BCv = np.full(len(lteff), -100.5)
BCv[lteff<3.70] = (-0.190537291496456*10.0**5) + \
(0.155144866764412*10.0**5*lteff[lteff<3.70]) + \
(-0.421278819301717*10.0**4.0*lteff[lteff<3.70]**2.0) + \
(0.381476328422343*10.0**3*lteff[lteff<3.70]**3.0)
BCv[(3.70<lteff) & (lteff<3.90)] = (-0.370510203809015*10.0**5) + \
(0.385672629965804*10.0**5*lteff[(3.70<lteff) & (lteff<3.90)]) + \
(-0.150651486316025*10.0**5*lteff[(3.70<lteff) & (lteff<3.90)]**2.0) + \
(0.261724637119416*10.0**4*lteff[(3.70<lteff) & (lteff<3.90)]**3.0) + \
(-0.170623810323864*10.0**3*lteff[(3.70<lteff) & (lteff<3.90)]**4.0)
BCv[lteff>3.90] = (-0.118115450538963*10.0**6) + \
(0.137145973583929*10.0**6*lteff[lteff > 3.90]) + \
(-0.636233812100225*10.0**5*lteff[lteff > 3.90]**2.0) + \
(0.147412923562646*10.0**5*lteff[lteff > 3.90]**3.0) + \
(-0.170587278406872*10.0**4*lteff[lteff > 3.90]**4.0) + \
(0.788731721804990*10.0**2*lteff[lteff > 3.90]**5.0)
u = 4.0 + 0.4 * 4.73 - 2.0 * np.log10(parallax) - 0.4 * (vmag - Av + BCv)
lum = 10**u # in solar units
e_lum = (2.0 / parallax * 10**u)**2 * parallax_err**2
e_lum = np.sqrt(e_lum)
return lum, e_lum
# calculate seismic parameters
def seismicParameters(teff, lum):
# solar parameters
teff_solar = 5777.0 # Kelvin
teffred_solar = 8907.0 #in Kelvin
numax_solar = 3090.0 # in micro Hz
dnu_solar = 135.1 # in micro Hz
cadence = 120 # in s
vnyq = (1.0 / (2.0*cadence)) * 10**6 # in micro Hz
teffred = teffred_solar*(lum**-0.093) # from (6) eqn 8. red-edge temp
rad = lum**0.5 * ((teff/teff_solar)**-2) # Steffan-Boltzmann law
numax = numax_solar*(rad**-1.85)*((teff/teff_solar)**0.92) # from (14)
return cadence, vnyq, rad, numax, teffred, teff_solar, teffred_solar, numax_solar, dnu_solar
# no coordinate conversion before calculating tess field observing time. Only
# works with ecliptic coordinates
def tess_field_only(e_lng, e_lat):
# create a list to append all of the total observing times 'T' in the TESS field to
T = [] # units of sectors (0-13)
# create a list to append all of the maximum contiguous observations to
max_T = [] # units of sectors (0-13)
for star in range(len(e_lng)):
# 'n' defines the distance between each equidistant viewing sector in the TESS field.
n = 360.0/13
# Define a variable to count the total number of sectors a star is observed in.
counter = 0
# Define a variable to count all of the observations for each star.
# Put each observation sector into sca separately in order to find the largest number
# of contiguous observations for each star.
sca = []
# 'ranges' stores all of the contiguous observations for each star.
ranges = []
# Defines the longitude range of the observing sectors at the inputted stellar latitude
lngrange = 24.0/abs(np.cos(np.radians(e_lat[star])))
if lngrange>=360.0:
lngrange=360.0
# if the star is in the northern hemisphere:
if e_lat[star] >= 0.0:
# For each viewing sector.
for i in range(1,14):
# Define an ra position for the centre of each sector in increasing longitude.
# if a hemisphere has an overshoot, replace 0.0 with the value.
a = 0.0+(n*(i-1))
# calculate the distances both ways around the
# circle between the star and the centre of the sector.
# The smallest distance is the one that should be used
# to see if the star lies in the observing sector.
d1 = abs(e_lng[star]-a)
d2 = (360.0 - abs(e_lng[star]-a))
if d1>d2:
d1 = d2
# if the star is in the 'overshoot' region for some sectors, calculate d3 and d4;
# the distances both ways around the circle bwtween the star and the centre of the
# 'overshooting past the pole' region of the sector.
# The smallest distance is the one that should be used
# to see if the star lies in the observing sector.
# the shortest distances between the centre of the sector and star, and the sector's
# overshoot and the star should add to 180.0 apart (i.e d1+d3=180.0)
d3 = abs(e_lng[star] - (a+180.0)%360.0)
d4 = 360.0 - abs(e_lng[star] - (a+180.0)%360.0)
if d3>d4:
d3 = d4
# check if a star lies in the field of that sector.
if (d1<=lngrange/2.0 and 6.0<=e_lat[star]) or (d3<=lngrange/2.0 and 78.0<=e_lat[star]):
counter += 1
sca = np.append(sca, i)
else:
pass
# if the star is in the southern hemisphere:
if e_lat[star] < 0.0:
# For each viewing sector.
for i in range(1,14):
# Define an ra position for the centre of each sector in increasing longitude.
# if a hemisphere has an overshoot, replace 0.0 with the value.
a = 0.0+(n*(i-1))
# calculate the distances both ways around the
# circle between the star and the centre of the sector.
# The smallest distance is the one that should be used
# to see if the star lies in the observing sector.
d1 = abs(e_lng[star]-a)
d2 = (360 - abs(e_lng[star]-a))
if d1>d2:
d1 = d2
# if the star is in the 'overshoot' region for some sectors, calculate d3 and d4;
# the distances both ways around the circle between the star and the centre of the
# 'overshooting past the pole' region of the sector.
# The smallest distance of the 2 is the one that should be used
# to see if the star lies in the observing sector.
d3 = abs(e_lng[star] - (a+180.0)%360.0)
d4 = (360 - abs(e_lng[star] - (a+180.0)%360.0))
if d3>d4:
d3 = d4
# check if a star lies in the field of that sector.
if (d1<=lngrange/2.0 and -6.0>=e_lat[star]) or (d3<=lngrange/2.0 and -78.0>=e_lat[star]):
counter += 1
sca = np.append(sca, i)
else:
pass
if len(sca) == 0:
ranges = [0]
else:
for k,g in groupby(enumerate(sca), lambda i_x:i_x[0]-i_x[1]):
group = map(itemgetter(1), g)
if np.array(group).sum() !=0:
ranges.append([len(list(group))])
T=np.append(T, counter)
max_T = np.append(max_T, np.max(np.array(ranges)))
return T, max_T
def calc_noise(imag, exptime, teff, e_lng = 0, e_lat = 30, g_lng = 96, g_lat = -30, subexptime = 2.0, npix_aper = 10, \
frac_aper = 0.76, e_pix_ro = 10, geom_area = 60.0, pix_scale = 21.1, sys_limit = 0):
omega_pix = pix_scale**2.0
n_exposures = exptime/subexptime
# electrons from the star
megaph_s_cm2_0mag = 1.6301336 + 0.14733937*(teff-5000.0)/5000.0
e_star = 10.0**(-0.4*imag) * 10.0**6 * megaph_s_cm2_0mag * geom_area * exptime * frac_aper
e_star_sub = e_star*subexptime/exptime
# e/pix from zodi
dlat = (abs(e_lat)-90.0)/90.0
vmag_zodi = 23.345 - (1.148*dlat**2.0)
e_pix_zodi = 10.0**(-0.4*(vmag_zodi-22.8)) * (2.39*10.0**-3) * geom_area * omega_pix * exptime
# e/pix from background stars
dlat = abs(g_lat)/40.0*10.0**0
dlon = g_lng
q = np.where(dlon>180.0)
if len(q[0])>0:
dlon[q] = 360.0-dlon[q]
dlon = abs(dlon)/180.0*10.0**0
p = [18.97338*10.0**0, 8.833*10.0**0, 4.007*10.0**0, 0.805*10.0**0]
imag_bgstars = p[0] + p[1]*dlat + p[2]*dlon**(p[3])
e_pix_bgstars = 10.0**(-0.4*imag_bgstars) * 1.7*10.0**6 * geom_area * omega_pix * exptime
# compute noise sources
noise_star = np.sqrt(e_star) / e_star
noise_sky = np.sqrt(npix_aper*(e_pix_zodi + e_pix_bgstars)) / e_star
noise_ro = np.sqrt(npix_aper*n_exposures)*e_pix_ro / e_star
noise_sys = 0.0*noise_star + sys_limit/(1*10.0**6)/np.sqrt(exptime/3600.0)
noise1 = np.sqrt(noise_star**2.0 + noise_sky**2.0 + noise_ro**2.0)
noise2 = np.sqrt(noise_star**2.0 + noise_sky**2.0 + noise_ro**2.0 + noise_sys**2.0)
return noise2
# calculate the granulation at a set of frequencies from (7) eqn 2 model F
def granulation(nu0, dilution, a_nomass, b1, b2, vnyq):
# Divide by dilution squared as it affects stars in the time series.
# The units of dilution change from ppm to ppm^2 microHz^-1 when going from the
# time series to frequency. p6: c=4 and zeta = 2*sqrt(2)/pi
Pgran = (((2*np.sqrt(2))/np.pi) * (a_nomass**2/b1) / (1 + ((nu0/b1)**4)) \
+ ((2*np.sqrt(2))/np.pi) * (a_nomass**2/b2) / (1 + ((nu0/b2)**4))) / (dilution**2)
# From (9). the amplitude suppression factor. Normalised sinc with pi (area=1)
eta = np.sinc((nu0/(2*vnyq)))
# the granulation after attenuation
Pgran = Pgran * eta**2
return Pgran, eta
# the total number of pixels used by the highest ranked x number of targets in the tCTL
def pixel_cost(x):
N = np.ceil(10.0**-5.0 * 10.0**(0.4*(20.0-x)))
N_tot = 10*(N+10)
total = np.cumsum(N_tot)
# want to find: the number of ranked tCTL stars (from highest to lowest rank) that correspond to a pixel cost of 1.4Mpix at a given time
per_cam = 26*4 # to get from the total pixel cost to the cost per camera at a given time, divide by this
pix_limit = 1.4e6 # the pixel limit per camera at a given time
return total[-1], per_cam, pix_limit, N_tot
# detection recipe to find whether a star has an observed solar-like Gaussian mode power excess
def globalDetections(g_lng, g_lat, e_lng, e_lat, imag, \
lum, rad, teff, numax, max_T, teffred, teff_solar, \
teffred_solar, numax_solar, dnu_solar, sys_limit, dilution, vnyq, cadence, vary_beta=False):
dnu = dnu_solar*(rad**-1.42)*((teff/teff_solar)**0.71) # from (14) eqn 21
beta = 1.0-np.exp(-(teffred-teff)/1550.0) # beta correction for hot solar-like stars from (6) eqn 9.
if isinstance(teff, float): # for only 1 star
if (teff>=teffred):
beta = 0.0
else:
beta[teff>=teffred] = 0.0
# to remove the beta correction, set Beta=1
if vary_beta == False:
beta = 1.0
# modified from (6) eqn 11. Now consistent with dnu proportional to numax^0.77 in (14)
amp = 0.85*2.5*beta*(rad**1.85)*((teff/teff_solar)**0.57)
# From (5) table 2 values for delta nu_{env}. env_width is defined as +/- some value.
env_width = 0.66 * numax**0.88
env_width[numax>100.] = numax[numax>100.]/2. # from (6) p12
total, per_cam, pix_limit, npix_aper = pixel_cost(imag)
noise = calc_noise(imag=imag, teff=teff, exptime=cadence, e_lng=e_lng, e_lat=e_lat, \
g_lng=g_lng, g_lat=g_lat, sys_limit=sys_limit, npix_aper=npix_aper)
noise = noise*10.0**6 # total noise in units of ppm
a_nomass = 0.85 * 3382*numax**-0.609 # multiply by 0.85 to convert to redder TESS bandpass.
b1 = 0.317 * numax**0.970
b2 = 0.948 * numax**0.992
# call the function for the real and aliased components (above and below vnyq) of the granulation
# the order of the stars is different for the aliases so fun the function in a loop
Pgran, eta = granulation(numax, dilution, a_nomass, b1, b2, vnyq)
Pgranalias = np.zeros(len(Pgran))
etaalias = np.zeros(len(eta))
# if vnyq is 1 fixed value
if isinstance(vnyq, float):
for i in range(len(numax)):
if numax[i] > vnyq:
Pgranalias[i], etaalias[i] = granulation((vnyq - (numax[i] - vnyq)), \
dilution, a_nomass[i], b1[i], b2[i], vnyq)
elif numax[i] < vnyq:
Pgranalias[i], etaalias[i] = granulation((vnyq + (vnyq - numax[i])), \
dilution, a_nomass[i], b1[i], b2[i], vnyq)
# if vnyq varies for each star
else:
for i in range(len(numax)):
if numax[i] > vnyq[i]:
Pgranalias[i], etaalias[i] = granulation((vnyq[i] - (numax[i] - vnyq[i])), \
dilution, a_nomass[i], b1[i], b2[i], vnyq[i])
elif numax[i] < vnyq[i]:
Pgranalias[i], etaalias[i] = granulation((vnyq[i] + (vnyq[i] - numax[i])), \
dilution, a_nomass[i], b1[i], b2[i], vnyq[i])
Pgrantotal = Pgran + Pgranalias
ptot = (0.5*2.94*amp**2.*((2.*env_width)/dnu)*eta**2.) / (dilution**2.)
Binstr = 2.0 * (noise)**2. * cadence*10**-6.0 # from (6) eqn 18
bgtot = ((Binstr + Pgrantotal) * 2.*env_width) # units are ppm**2
snr = ptot/bgtot # global signal to noise ratio from (11)
fap = 0.05 # false alarm probability
pdet = 1.0 - fap
pfinal = np.full(rad.shape[0], -99)
idx = np.where(max_T != 0) # calculate the indexes where T is not 0
tlen=max_T[idx]*27.4*86400.0 # the length of the TESS observations in seconds
bw=1.0 * (10.0**6.0)/tlen
nbins=(2.*env_width[idx]/bw).astype(int) # from (11)
snrthresh = stats.chi2.ppf(pdet, 2.0*nbins) / (2.0*nbins) - 1.0
pfinal[idx] = stats.chi2.sf((snrthresh+1.0) / (snr[idx]+1.0)*2.0*nbins, 2.*nbins)
return pfinal, snr, dnu # snr is needed in TESS_telecon2.py
def BV2VI(bv, vmag, g_mag_abs):
whole = pd.DataFrame(data={'B-V': bv, 'Vmag': vmag, 'g_mag_abs': g_mag_abs, 'Ai': 0})
# Mg: empirical relation from Tiago to separate dwarfs from giants
# note: this relation is observational; it was made with REDDENED B-V and g_mag values
whole['Mg'] = 6.5*whole['B-V'] - 1.8
# B-V-to-teff limits from (6) fig 5
whole = whole[(whole['B-V'] > -0.4) & (whole['B-V'] < 1.7)]
print(whole.shape, 'after B-V cuts')
# B-V limits for dwarfs and giants, B-V conditions from (1)
# if a star can't be classified as dwarf or giant, remove it
condG = (whole['B-V'] > -0.25) & (whole['B-V'] < 1.75) & (whole['Mg'] > whole['g_mag_abs'])
condD1 = (whole['B-V'] > -0.23) & (whole['B-V'] < 1.4) & (whole['Mg'] < whole['g_mag_abs'])
condD2 = (whole['B-V'] > 1.4) & (whole['B-V'] < 1.9) & (whole['Mg'] < whole['g_mag_abs'])
whole = pd.concat([whole[condG], whole[condD1], whole[condD2]], axis=0)
print(whole.shape, 'after giant/dwarf cuts')
whole['V-I'] = 100. # write over these values for dwarfs and giants separately
# coefficients for giants and dwarfs
cg = [-0.8879586e-2, 0.7390707, 0.3271480, 0.1140169e1, -0.1908637, -0.7898824,
0.5190744, 0.5358868]
cd1 = [0.8906590e-1, 0.1319675e1, 0.4461807, -0.1188127e1, 0.2465572, 0.8478627e1,
0.1046599e2, 0.3641226e1]
cd2 = [-0.5421588e2, 0.8011383e3, -0.4895392e4, 0.1628078e5, -0.3229692e5,
0.3939183e5, -0.2901167e5, 0.1185134e5, -0.2063725e4]
# calculate (V-I) for giants
x = whole['B-V'][condG] - 1
y = (cg[0] + cg[1]*x + cg[2]*(x**2) + cg[3]*(x**3) + cg[4]*(x**4) +\
cg[5]*(x**5) + cg[6]*(x**6) + cg[7]*(x**7))
whole['V-I'][condG] = y + 1
x, y = [[] for i in range(2)]
# calculate (V-I) for dwarfs (1st B-V range)
x = whole['B-V'][condD1] - 1
y = (cd1[0] + cd1[1]*x + cd1[2]*(x**2) + cd1[3]*(x**3) + cd1[4]*(x**4) +\
cd1[5]*(x**5) + cd1[6]*(x**6) + cd1[7]*(x**7))
whole['V-I'][condD1] = y + 1
x, y = [[] for i in range(2)]
# calculate (V-I) for dwarfs (2nd B-V range)
x = whole['B-V'][condD2] - 1
y = (cd2[0] + cd2[1]*x + cd2[2]*(x**2) + cd2[3]*(x**3) + cd2[4]*(x**4) +\
cd2[5]*(x**5) + cd2[6]*(x**6) + cd2[7]*(x**7) + cd2[8]*(x**8))
whole['V-I'][condD2] = y + 1
x, y = [[] for i in range(2)]
# calculate Imag from V-I and reredden it
whole['Imag'] = whole['Vmag']-whole['V-I']
whole['Imag_reddened'] = whole['Imag'] + whole['Ai']
"""
# make Teff, luminosity, Plx and ELat cuts to the data
whole = whole[(whole['teff'] < 7700) & (whole['teff'] > 4300) & \
(whole['Lum'] > 0.3) & (whole['lum_D'] < 50) & ((whole['e_Plx']/whole['Plx']) < 0.5) \
& (whole['Plx'] > 0.) & ((whole['ELat']<=-6.) | (whole['ELat']>=6.))]
print(whole.shape, 'after Teff/L/Plx/ELat cuts')
"""
whole.drop(['Ai', 'Imag_reddened', 'Mg'], axis=1, inplace=True)
return whole.as_matrix().T
# make cuts to the data
def cuts(teff, e_teff, metal, e_metal, g_lng, g_lat, e_lng, e_lat, Tmag, e_Tmag, Vmag, e_Vmag, plx, e_plx, lum, star_name):
d = {'teff':teff, 'e_teff':e_teff, 'metal':metal, 'e_metal':e_metal, 'g_lng':g_lng, 'g_lat':g_lat, 'e_lng':e_lng, 'e_lat':e_lat,
'Tmag':Tmag, 'e_Tmag':e_Tmag, 'Vmag':Vmag, 'e_Vmag':e_Vmag, 'plx':plx, 'e_plx':e_plx, 'lum':lum, 'star_name':star_name}
whole = pd.DataFrame(d, columns = ['teff', 'e_teff', 'metal', 'e_metal', 'g_lng', 'g_lat', 'e_lng', 'e_lat',
'Tmag', 'e_Tmag', 'Vmag', 'e_Vmag', 'plx', 'e_plx', 'lum', 'star_name'])
whole = whole[(whole['teff'] < 7700.) & (whole['teff'] > 4300.) & (whole['e_teff'] > 0.) & \
(whole['lum'] > 0.3) & (whole['lum'] < 50.) & ((whole['e_plx']/whole['plx']) < 0.5) & \
(whole['plx'] > 0.) & ((whole['e_lat']<=-6.) | (whole['e_lat']>=6.)) & \
(whole['Tmag'] > 3.5) & (whole['e_metal'] > 0.)]
print(whole.shape, 'after cuts to the data')
return whole.as_matrix().T
if __name__ == '__main__':
df = pd.read_csv('files/MAST_Crossmatch_TIC4.csv', header=0,
index_col=False)
data = df.values
# star_name = data[:, 1]
teff = pd.to_numeric(data[:, 88])
# e_teff = pd.to_numeric(data[:, 89])
# metal = pd.to_numeric(data[:, 92])
# e_metal = pd.to_numeric(data[:, 93])
# g_lng = pd.to_numeric(data[:, 48])
# g_lat = pd.to_numeric(data[:, 49])
# e_lng = pd.to_numeric(data[:, 50])
# e_lat = pd.to_numeric(data[:, 51])
# Tmag = pd.to_numeric(data[:, 84])
# e_Tmag = pd.to_numeric(data[:, 85])
Vmag = pd.to_numeric(data[:, 54])
# e_Vmag = pd.to_numeric(data[:, 55])
plx = pd.to_numeric(data[:, 45])
e_plx = pd.to_numeric(data[:, 46])
lum, e_lum = Teff2bc2lum(teff, plx, e_plx, Vmag)
df[' Luminosity'] = pd.Series(lum)
df[' Luminosity Err.'] = pd.Series(e_lum)
# teff, e_teff, metal, e_metal, g_lng, g_lat, e_lng, e_lat, Tmag, e_Tmag, \
# Vmag, e_Vmag, plx, e_plx, lum, star_name = cuts(teff, e_teff, metal, e_metal,
# g_lng, g_lat, e_lng, e_lat, Tmag, e_Tmag, Vmag, e_Vmag,
# plx, e_plx, lum, star_name)
# make cuts to the data
df = df[(df[' T_eff'] < 7700.) & (df[' T_eff'] > 4300.) & (df[' T_eff Err.'] > 0.) & \
(df[' Luminosity'] > 0.3) & (df[' Luminosity'] < 50.) & ((df[' Parallax Err.']/df[' Parallax']) < 0.5) & \
(df[' Parallax'] > 0.) & ((df[' Ecl. Lat.']<=-6.) | (df[' Ecl. Lat.']>=6.)) & \
(df[' TESS Mag.'] > 3.5) & (df[' Metallicity Err.'] > 0.)]
df = df.reset_index(drop=True)
print(df.shape, 'after cuts to the data')
data = df.values
teff = | pd.to_numeric(data[:, 88]) | pandas.to_numeric |
"""
The BIGMACC script.
"""
import os
import pandas as pd
import time
import logging
logging.getLogger('numba').setLevel(logging.WARNING)
import shutil
import cea.config
import cea.utilities
import cea.inputlocator
import cea.demand.demand_main
import cea.resources.radiation_daysim.radiation_main
import cea.bigmacc.copy_results
import cea.bigmacc.bigmacc_rules
import cea.datamanagement.archetypes_mapper
import cea.datamanagement.data_initializer
import cea.analysis.costs.system_costs
import cea.analysis.lca.main
import cea.demand.schedule_maker.schedule_maker as schedule_maker
import cea.bigmacc.bigmacc_util as util
import distutils
import cea.technologies.solar.photovoltaic as photovoltaic
import cea.resources.water_body_potential as water
from distutils import dir_util
__author__ = "<NAME>"
__copyright__ = ""
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = ""
__email__ = ""
__status__ = ""
def run(config):
print('Key in run')
print(config.bigmacc.key)
"""
This is the main entry point to your script. Any parameters used by your script must be present in the ``config``
parameter. The CLI will call this ``main`` function passing in a ``config`` object after adjusting the configuration
to reflect parameters passed on the command line / user interface
:param config:
:type config: cea.config.Configuration
:return:
"""
locator = cea.inputlocator.InputLocator(config.scenario)
i = config.bigmacc.key
print('i')
print(i)
# SCENARIO SETUP ---
scen_check = pd.read_csv(os.path.join(config.bigmacc.keys, 'logger.csv'), index_col='Unnamed: 0')
experiment_key = 'exp_{}'.format(i)
print(experiment_key)
if experiment_key in scen_check['Experiments'].values.tolist():
print('Experiment was finished previously, moving to next.')
pass
else:
print('START: experiment {}.'.format(i))
# INITIALIZE TIMER ---
t0 = time.perf_counter()
if os.path.exists(os.path.join(config.bigmacc.keys, i)):
print(' - Folder exists for experiment {}.'.format(i))
else:
os.mkdir(os.path.join(config.bigmacc.keys, i))
print(' - Folder does not exist for experiment {}, creating now.'.format(i))
# run the archetype mapper to leverage the newly loaded typology file and set parameters
print(' - Running archetype mapper for experiment {} to remove changes made in the last experiment.'.format(i))
cea.datamanagement.archetypes_mapper.main(config)
# run the rule checker to set the scenario parameters
print(' - Running rule checker for experiment {}.'.format(i))
cea.bigmacc.bigmacc_rules.main(config)
# SIMULATIONS ---
print(' - Run radiation is {}.'.format(config.bigmacc.runrad))
print(' - Write sensor data is {}.'.format(config.radiation.write_sensor_data))
# checking on need for radiation simulation
if config.bigmacc.runrad == True:
print(' - Running radiation simulation for experiment {}.'.format(i))
if os.path.exists(locator.get_radiation_building('B000')):
print(' - Radiation folder exists for experiment {}, using that.'.format(i))
else:
print(' - Radiation running for experiment {}.'.format(i))
cea.resources.radiation_daysim.radiation_main.main(config)
else:
radfiles = config.bigmacc.copyrad
print(' - Copying radiation results from {}.'.format(radfiles))
distutils.dir_util.copy_tree(radfiles, locator.get_solar_radiation_folder())
# shutil.copy(radfiles, locator.get_solar_radiation_folder())
print(' - Experiment {} does not require new radiation simulation.'.format(i))
# running demand forecasting
if os.path.exists(locator.get_schedule_model_file('B000')):
print(' - Schedules exist for experiment {}.'.format(i))
else:
print(' - Schedule maker running for experiment {}.'.format(i))
schedule_maker.main(config)
print(' - Running demand simulation for experiment {}.'.format(i))
cea.demand.demand_main.main(config)
print(' - Run PV is {}.'.format(config.bigmacc.pv))
# if PV simulation is needed, run it.
if config.bigmacc.pv == True:
print(' - Running PV simulation for experiment {}.'.format(i))
photovoltaic.main(config)
print('Run water-body exchange is {}.'.format(config.bigmacc.water))
# if water-body simulation is needed, run it.
if config.bigmacc.water == True:
print(' - Running water body simulation for experiment {}.'.format(i))
water.main(config)
# running the emissions and costing calculations
cea.analysis.costs.system_costs.main(config)
cea.analysis.lca.main.main(config)
# clone out the simulation inputs and outputs directory
print(' - Transferring results directory for experiment {}.'.format(i))
inputs_path = os.path.join(config.bigmacc.keys, i, config.general.scenario_name, 'inputs')
outputs_path = os.path.join(config.bigmacc.keys, i, config.general.scenario_name, 'outputs', 'data')
# costs_path = os.path.join(config.bigmacc.keys, i, 'outputs', 'data', 'costs')
# demand_path = os.path.join(config.bigmacc.keys, i, 'outputs', 'data', 'demand')
# emissions_path = os.path.join(config.bigmacc.keys, i, 'outputs', 'data', 'emissions')
# rad_path = os.path.join(config.bigmacc.keys, i, 'outputs', 'data', 'solar-radiation')
distutils.dir_util.copy_tree(locator.get_data_results_folder(), outputs_path)
distutils.dir_util.copy_tree(locator.get_input_folder(), inputs_path)
time_elapsed = time.perf_counter() - t0
log_df = pd.read_csv(os.path.join(config.bigmacc.keys, 'logger.csv'),
index_col='Unnamed: 0')
log_df = log_df.append(pd.DataFrame({'Experiments': 'exp_{}'.format(i),
'Completed': 'True',
'Experiment Time': '%d.2 seconds' % time_elapsed,
'Unique Radiation': config.bigmacc.runrad}, index=[0]), ignore_index=True)
log_df.to_csv(os.path.join(config.bigmacc.keys, 'logger.csv'))
log_df.to_csv(r"C:\Users\justi\Desktop\126logger_backup.csv",)
# delete results
shutil.rmtree(locator.get_costs_folder())
shutil.rmtree(locator.get_demand_results_folder())
shutil.rmtree(locator.get_lca_emissions_results_folder())
shutil.rmtree(locator.get_solar_radiation_folder())
shutil.rmtree(locator.get_potentials_folder())
keys = [int(x) for x in str(i)]
if keys[0] == 1:
cea.datamanagement.data_initializer.main(config)
else:
pass
print('END: experiment {}. \n'.format(i))
def main(config):
print('STARTING UP THE BIGMACC SCRIPT')
cea.datamanagement.data_initializer.main(config)
key_list = util.generate_key_list(config)
if os.path.exists(os.path.join(config.bigmacc.keys, 'logger.csv')):
pass
else:
initialdf = | pd.DataFrame(columns=['Experiments', 'Completed', 'Experiment Time', 'Unique Radiation']) | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def sqlite_db() -> str:
conn = os.environ["SQLITE_URL"]
return conn
def test_read_sql_without_partition(sqlite_db: str) -> None:
query = "SELECT test_int, test_nullint, test_str, test_float, test_bool, test_date, test_time, test_datetime FROM test_table"
df = read_sql(sqlite_db, query)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="Int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "こんにちは", "b", "Ha好ち😁ðy̆", None], dtype="object"
),
"test_float": | pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64") | pandas.Series |
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import pandas as pd
import plotly.express as px
import plotly.graph_objs as go
# Importing csv as a Dataframe
covid_deaths = pd.read_csv('./data/fallecidos_covid.csv', delimiter=';')
# Formatting date column as datetime type
covid_deaths['FECHA_FALLECIMIENTO'] = | pd.to_datetime(covid_deaths['FECHA_FALLECIMIENTO'], format='%Y%m%d') | pandas.to_datetime |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
Implement DataFrame public API as Pandas does.
Almost all docstrings for public and magic methods should be inherited from Pandas
for better maintability. So some codes are ignored in pydocstyle check:
- D101: missing docstring in class
- D102: missing docstring in public method
- D105: missing docstring in magic method
Manually add documentation for methods which are not presented in pandas.
"""
import pandas
from pandas.core.common import apply_if_callable
from pandas.core.dtypes.common import (
infer_dtype_from_object,
is_dict_like,
is_list_like,
is_numeric_dtype,
)
from pandas.core.indexes.api import ensure_index_from_sequences
from pandas.util._validators import validate_bool_kwarg
from pandas.io.formats.printing import pprint_thing
from pandas._libs.lib import no_default
from pandas._typing import Label
import itertools
import functools
import numpy as np
import sys
from typing import Optional, Sequence, Tuple, Union, Mapping
import warnings
from modin.error_message import ErrorMessage
from modin.utils import _inherit_docstrings, to_pandas, hashable
from modin.config import IsExperimental
from .utils import (
from_pandas,
from_non_pandas,
)
from .iterator import PartitionIterator
from .series import Series
from .base import BasePandasDataset, _ATTRS_NO_LOOKUP
from .groupby import DataFrameGroupBy
from .accessor import CachedAccessor, SparseFrameAccessor
@_inherit_docstrings(pandas.DataFrame, excluded=[pandas.DataFrame.__init__])
class DataFrame(BasePandasDataset):
def __init__(
self,
data=None,
index=None,
columns=None,
dtype=None,
copy=False,
query_compiler=None,
):
"""
Distributed DataFrame object backed by Pandas dataframes.
Parameters
----------
data: NumPy ndarray (structured or homogeneous) or dict:
Dict can contain Series, arrays, constants, or list-like
objects.
index: pandas.Index, list, ObjectID
The row index for this DataFrame.
columns: pandas.Index
The column names for this DataFrame, in pandas Index object.
dtype: Data type to force.
Only a single dtype is allowed. If None, infer
copy: bool
Copy data from inputs. Only affects DataFrame / 2d ndarray input.
query_compiler: query_compiler
A query compiler object to manage distributed computation.
"""
if isinstance(data, (DataFrame, Series)):
self._query_compiler = data._query_compiler.copy()
if index is not None and any(i not in data.index for i in index):
raise NotImplementedError(
"Passing non-existant columns or index values to constructor not"
" yet implemented."
)
if isinstance(data, Series):
# We set the column name if it is not in the provided Series
if data.name is None:
self.columns = [0] if columns is None else columns
# If the columns provided are not in the named Series, pandas clears
# the DataFrame and sets columns to the columns provided.
elif columns is not None and data.name not in columns:
self._query_compiler = from_pandas(
DataFrame(columns=columns)
)._query_compiler
if index is not None:
self._query_compiler = data.loc[index]._query_compiler
elif columns is None and index is None:
data._add_sibling(self)
else:
if columns is not None and any(i not in data.columns for i in columns):
raise NotImplementedError(
"Passing non-existant columns or index values to constructor not"
" yet implemented."
)
if index is None:
index = slice(None)
if columns is None:
columns = slice(None)
self._query_compiler = data.loc[index, columns]._query_compiler
# Check type of data and use appropriate constructor
elif query_compiler is None:
distributed_frame = from_non_pandas(data, index, columns, dtype)
if distributed_frame is not None:
self._query_compiler = distributed_frame._query_compiler
return
warnings.warn(
"Distributing {} object. This may take some time.".format(type(data))
)
if is_list_like(data) and not is_dict_like(data):
old_dtype = getattr(data, "dtype", None)
values = [
obj._to_pandas() if isinstance(obj, Series) else obj for obj in data
]
if isinstance(data, np.ndarray):
data = np.array(values, dtype=old_dtype)
else:
try:
data = type(data)(values, dtype=old_dtype)
except TypeError:
data = values
elif is_dict_like(data) and not isinstance(
data, (pandas.Series, Series, pandas.DataFrame, DataFrame)
):
data = {
k: v._to_pandas() if isinstance(v, Series) else v
for k, v in data.items()
}
pandas_df = pandas.DataFrame(
data=data, index=index, columns=columns, dtype=dtype, copy=copy
)
self._query_compiler = from_pandas(pandas_df)._query_compiler
else:
self._query_compiler = query_compiler
def __repr__(self):
from pandas.io.formats import console
num_rows = pandas.get_option("display.max_rows") or 10
num_cols = pandas.get_option("display.max_columns") or 20
if pandas.get_option("display.max_columns") is None and pandas.get_option(
"display.expand_frame_repr"
):
width, _ = console.get_console_size()
width = min(width, len(self.columns))
col_counter = 0
i = 0
while col_counter < width:
col_counter += len(str(self.columns[i])) + 1
i += 1
num_cols = i
i = len(self.columns) - 1
col_counter = 0
while col_counter < width:
col_counter += len(str(self.columns[i])) + 1
i -= 1
num_cols += len(self.columns) - i
result = repr(self._build_repr_df(num_rows, num_cols))
if len(self.index) > num_rows or len(self.columns) > num_cols:
# The split here is so that we don't repr pandas row lengths.
return result.rsplit("\n\n", 1)[0] + "\n\n[{0} rows x {1} columns]".format(
len(self.index), len(self.columns)
)
else:
return result
def _repr_html_(self): # pragma: no cover
num_rows = pandas.get_option("max_rows") or 60
num_cols = | pandas.get_option("max_columns") | pandas.get_option |
"""
Analysis functions for pom data
05/09/2018
"""
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
" Creating dataframes and aggregating population biomarker data per simulation "
def fill_in_grouped_sim_data(grouped_sim_data, results, biomarker,
sim_types, sim_type_amp_relationship,
sim_name_conversions,
how_to_fill, filter_biomarkers_for_outliers=False, threshold=None):
""""
Fill in a dataframe with aggregated biomarker data
"""
# Check inputs
assert any([amp_scale_factor == 1 for _,amp_scale_factor in sim_type_amp_relationship.items()]), "No base value of 1 for any of the sim types in sim_type_amp_relationship"
for name in grouped_sim_data.index:
# To do here: get simulations from the conversion dict, do the appropriate averaging for the how_to_fill method, add that data in
# Then, get gnav18 from the simulation short name and the amplitudes from the long names using the conversion dict.
# Add these in too.
# And we're done
#names =
# Get simulations
simulations = sim_name_conversions[name]
for simulation, sim_type in simulations.items():
# Input biomarker data
unaggregated_data = results.loc[:,(simulation,biomarker)]
if threshold:
# Only keep rows that are not below or at threshold
unaggregated_data = unaggregated_data[unaggregated_data <= threshold]
# filtering of biomarkers for outliers
if filter_biomarkers_for_outliers:
if biomarker == "APHalfWidth":
# Remove outliers
outlier_definition = get_outlier_definition(biomarker) # ms
outliers = unaggregated_data >= outlier_definition
num_outliers = outliers.sum()
mask = ~outliers # Invert boolean series
unaggregated_data = unaggregated_data[mask]
#if num_outliers > 0:
#print("Removed {} AP Half Width outliers > {}".format(num_outliers, outlier_definition))
if how_to_fill == 'mean':
data = unaggregated_data.mean()
elif how_to_fill == 'std':
data = unaggregated_data.std()
elif how_to_fill == 'median':
data = unaggregated_data.median()
elif how_to_fill == 'mean_fillna':
data = unaggregated_data.fillna(0).mean()
elif how_to_fill == 'mean_freq_fillna':
assert biomarker == 'ISI', "Biomarker for frequency needs to be ISI not {}".format(biomarker)
# Convert to frequency, then fill nans with 0s and take mean
unaggregated_data = 1000.0/unaggregated_data
data = unaggregated_data.fillna(0).mean()
elif how_to_fill == 'mean_freq_dropna':
assert biomarker == 'ISI'
# Convert to frequency, then DROP nans and take mean
unaggregated_data = 1000.0/unaggregated_data
data = unaggregated_data.dropna().mean()
elif isinstance(how_to_fill, int):
# Model index
data = unaggregated_data.loc[how_to_fill]
else:
raise ValueError("How to fill method: {} not supported.".format(how_to_fill))
grouped_sim_data.at[name,(biomarker, sim_type)] = data
# Input amplitudes
amp = get_amplitude(simulation, amp_units='pA', delimiter='_')
grouped_sim_data.at[name, ('Amp', sim_type)] = amp
# Input scaling factors
scaling_factors = list(grouped_sim_data['Scaling factors'].columns)
for scaling_factor in scaling_factors:
# Get scaling factor
scaling_factor_value = get_parameter_scaling(simulation, scaling_factor, delimiter='_')
grouped_sim_data.at[name, ('Scaling factors', scaling_factor)] = scaling_factor_value
def make_grouped_sim_data(pop, biomarker='APFullWidth', agg='mean', filter_outliers=False, scaled_parameters=['GNav18'], threshold=None):
" Aggregate population biomarker results per simulation to analyse at a per simulation level over the ensemble population. "
sim_types = ['step', 'ramp'] # First entry in list is the base
sim_type_as_base = sim_types[0] # Use step as base as ramp has amplitude x10 of step
sim_type_amp_relationship = {'step':1, 'ramp':10}
assert sim_type_amp_relationship[sim_type_as_base] == 1
grouped_sim_data = make_empty_grouped_sim_data( pop=pop,
biomarker=biomarker,
filter_outliers=filter_outliers,
scaled_parameters=scaled_parameters,
sim_types=sim_types,
sim_type_as_base=sim_type_as_base,
sim_type_amp_relationship=sim_type_amp_relationship,
)
sim_name_conversions = make_sim_name_conversions (pop.get_simulation_names(),
sim_types,
sim_type_amp_relationship,
sim_type_as_base
)
fill_in_grouped_sim_data(grouped_sim_data, pop.results, biomarker,
sim_types, sim_type_amp_relationship,
sim_name_conversions,
how_to_fill=agg,
filter_biomarkers_for_outliers=filter_outliers,
threshold=threshold)
if biomarker is not 'Firing pattern':
grouped_sim_data = grouped_sim_data.astype(float)
return grouped_sim_data
def make_empty_grouped_sim_data(pop, biomarker='APFullWidth', filter_outliers=False,
scaled_parameters=['GNav18'], sim_types=['step','ramp'],
sim_type_as_base='step', sim_type_amp_relationship={'step':1, 'ramp':10},
):
" Aggregate population biomarker results per simulation to analyse at a per simulation level over the ensemble population. "
arrays =[[biomarker]*len(sim_types)+['Amp']*len(sim_types) + ['Scaling factors']*len(scaled_parameters),sim_types*2 + scaled_parameters] # Build multiarray columns
columns = pd.MultiIndex.from_arrays(arrays, names=['',''])
sim_names = pop.get_simulation_names()
sim_name_conversions = make_sim_name_conversions(sim_names, sim_types, sim_type_amp_relationship, sim_type_as_base)
short_sim_names = sorted(list(sim_name_conversions.keys()))
grouped_sim_data = pd.DataFrame(columns=columns, index=short_sim_names)
return grouped_sim_data
def make_sim_name_conversions(sim_names, sim_types, sim_type_amp_relationship, sim_type_as_base):
" Make conversion dict from short sim names to full sim names with sim type "
sim_name_conversions = {}
# Get list of sim_names common to step and ramp
# Complexity: ramp and step amps are different so we will just get the name from step
for sim_type in sim_types:
for sim_name in sim_names:
# # Ignore rheobase simulations and do base sim type first - important to get oreder right in sim_name_conversions
if (sim_name not in sim_types) & (sim_type in sim_name):
short_sim_name, _sim_type = process_sim_name(sim_name, sim_types, sim_type_amp_relationship, amp_units='pA', delimiter='_')
assert _sim_type == sim_type
# Build up conversion dict from short name to full names and store sim_type
if short_sim_name in sim_name_conversions.keys():
sim_name_conversions[short_sim_name][sim_name] = sim_type
else:
assert sim_type == sim_type_as_base, "Sim type: {}, name:{}, short_sim_name:{}".format(sim_type, sim_name, short_sim_name)
sim_name_conversions[short_sim_name] = {sim_name:sim_type}
return sim_name_conversions
" Data processing "
def process_firing_pattern_data(firing_pattern_percentages, sim_types=['step','ramp'],
sim_type_amp_relationship = {'step':1, 'ramp':10}, scaled_parameters=['GNav18'],
):
"""
Process simulation names to extract simulation parameters and rename to remove stimulation protocol from name
Flow:
1. Convert sim names by removing sim type and storing the conversion between full and shortened names.
2. Create a formatted dataframe and fill in the firing pattern percentages from the values in the original dataframe.
3. Extract simulation parameters from the simulation name and add to the formatted dataframe.
TODO: This code shares a lot of code with the functions for aggregating biomarker data. Could refactor into one set of functions
sharing common code.
"""
# TODO: Could turn these lines creating sim_names and sim_name_conversions
# into a function shared with similar code for biomarkers
sim_type_as_base = sim_types[0]
sim_names = firing_pattern_percentages.index.tolist()
short_sim_names = [] # Simulation names without stimulation protocol
sim_name_conversions = {} # Conversion between full and short sim names
for sim_type in sim_types:
for sim_name in sim_names:
if (sim_name not in sim_types) & (sim_type in sim_name): # Remove rheobase simulations
short_sim_name, _sim_type = process_sim_name(sim_name, sim_types, sim_type_amp_relationship, amp_units='pA', delimiter='_')
assert _sim_type == sim_type
# Create conversion between names
if short_sim_name in sim_name_conversions.keys():
sim_name_conversions[short_sim_name][sim_name] = sim_type
else:
assert sim_type == sim_type_as_base, (
"Sim type: {}, name:{}, short_sim_name:{} is not the base sim type for the sim type amp relationship.".format(
sim_type, sim_name, short_sim_name))
sim_name_conversions[short_sim_name] = {sim_name:sim_type}
if sim_type == sim_type_as_base: # Only add step to sim_names to avoid adding ramp as ramp has different amplitude
short_sim_names.append(short_sim_name)
short_sim_names = sorted(short_sim_names)
formatted_firing_pattern_data = format_firing_pattern_percentages(
firing_pattern_percentages,
short_sim_names,
sim_name_conversions,
scaled_parameters,
sim_types,
sim_type_amp_relationship,
)
return formatted_firing_pattern_data
def format_firing_pattern_percentages(firing_pattern_percentages, short_sim_names, sim_name_conversions, scaled_parameters, sim_types, sim_type_amp_relationship):
"""
Fill in a dataframe with firing pattern percentages for each simulation
Equivalent to fill_in_grouped_sim_data() but for firing patterns not single numeric biomarkers.
Copy code from fill_in_grouped_sim_data where needed but:
1. We don't need a how to fill option as we aggregate by percentages always.
2. We do need to fill in for all firing patterns, not just one biomarker.
"""
" Create formatted dataframe with column multiindex "
assert len(scaled_parameters) == 1, "Multiple scaled parameters not supported by format_firing_percentages yet"
firing_pattern_names = firing_pattern_percentages.columns.tolist()
sim_type_as_base = sim_types[0] # First sim type in list is the base
assert sim_type_amp_relationship[sim_type_as_base] == 1
arrays = [list(np.repeat(firing_pattern_names,len(sim_types))) + ['Amp','Amp', 'Scaling factors'],
sim_types*(1+len(firing_pattern_names)) + scaled_parameters] # Build multiarray columns
columns = pd.MultiIndex.from_arrays(arrays, names=['',''])
formatted_firing_pattern_data = pd.DataFrame(index=short_sim_names, columns=columns)
" Fill in firing pattern percentages; get and fill in simulation parameters from simulation names "
for name in formatted_firing_pattern_data.index:
simulations = sim_name_conversions[name]
for simulation, sim_type in simulations.items():
# Fill in firing pattern percentage
for fp_name in firing_pattern_names:
formatted_firing_pattern_data.at[name, (fp_name, sim_type)] = firing_pattern_percentages.at[simulation,fp_name]
# Fill in stimulus amplitude
amp = get_amplitude(simulation, amp_units='pA', delimiter='_')
formatted_firing_pattern_data.at[name, ('Amp', sim_type)] = amp
# Fill in parameter scaling
scaling_factors = list(formatted_firing_pattern_data['Scaling factors'].columns)
for scaling_factor in scaling_factors:
scaling_factor_value = get_parameter_scaling(simulation, scaling_factor, delimiter='_')
formatted_firing_pattern_data.at[name, ('Scaling factors', scaling_factor)] = scaling_factor_value
return formatted_firing_pattern_data
def define_region(pop, firing_pattern_thresholds={}, other_thresholds={}, stim_type='step', verbose=False):
"""
Use some condition e.g. % of models with a given firing pattern to define a region of simulation space to analyse further.
"""
import operator
opcodes = {'>':operator.gt, '>=':operator.ge, '<':operator.lt, '<=':operator.le}
firing_pattern_percentages = get_firing_pattern_percentages(pop) # Excludes rheobase simulations
firing_pattern_percentages = process_firing_pattern_data(firing_pattern_percentages)
region = {}
# Check each simulation against all thresholds, if it passes them all then add to defined region
simulations = firing_pattern_percentages.index # Already excluded rheobase simulations and parameters in process_firing_pattern_data
count = 0
for simulation in simulations:
accept_simulation = True
# Firing pattern thresholds
for firing_pattern, threshold_vals in firing_pattern_thresholds.items():
op = opcodes[threshold_vals[0]]
threshold = threshold_vals[1]
fp_percentage = firing_pattern_percentages.loc[simulation,(firing_pattern, stim_type)]
# Check if threshold is accepted
if not op(fp_percentage, threshold):
accept_simulation = False
# Other thresholds (aggregated biomarkers)
for threshold, val in other_thresholds.items():
print('Other thresholds (aggregated biomarkers) not implemented yet.')
# Get aggregated biomarker data and do the same operation on it as for firing pattern but with biomarker values not percentages
if accept_simulation:
count += 1
amplitude = firing_pattern_percentages.loc[simulation,('Amp', stim_type)]
scaling_factors = dict(firing_pattern_percentages.loc[simulation, 'Scaling factors'])
region[simulation] = {'Amp':amplitude}
for scaling_factor, val in scaling_factors.items():
region[simulation][scaling_factor] = val
if verbose:
print('{} simulations out of {} accepted.'.format(count, len(simulations)))
return region
def get_biomarker_values_by_simulation(results, biomarker_names, simulation_names):
"""
Transform pom results dataframe to turn make the simulation name a categorical column rather than a multiindex level
Examples
1 - simple:
pop = pom.load('nav18_project_2_1_1.pickle')
get_biomarker_values_by_simulation(pop.results, biomarker_names='APPeak', simulation_names=['ramp','step'])
2 - multiple biomarkers, list comprehension for simulation names:
get_biomarker_values_by_simulation(pop.results,
biomarker_names=['APPeak', 'Threshold', 'Firing pattern'],
simulation_names=[name for name in pop.results.columns.levels[0]
if name not in ['Parameters', 'step', 'ramp']]
)
"""
data = results.copy()
data = results.swaplevel(0,1,axis=1) # Swap so top column level is biomarker not simulation
data = data.sort_index(level=0,axis=1) # Group under each biomarker
data = data.loc[:,(biomarker_names, simulation_names)] # Get only needed biomarker and simulations
data = data.stack().reset_index().set_index('Model') # Remove simulation from column index and turn it into column
data = data.rename(columns={'':'Simulation'}) # Name the Simulation column
return data
def pivot_dataframe_for_heatmap(df, x, y, z, labels):
"""
Pivot a dataframe and drop unwanted components so the the columns are x, the rows are y, and the element values are z.
Labels should be axis names ordered in a list [x,y,z].
"""
df_labels = [x,y,z]
df = pd.DataFrame()
for df_label, heatmap_label in zip(df_labels,labels):
df[heatmap_label] = grouped_sim_data[df_label]
# Pivot
df = df.pivot(index=labels[1], columns=labels[0], values=labels[2])
df = df.iloc[::-1] # Reverse index to have origin at 0 0
return df
def pom_consistency_in_region(pop, region, firing_pattern, stim_type, amp_stim_relationship):
"""
Plot or find mean and std dev of the distribution of firing pattern consistency in the population,
within a defined region. See brown notebook 1 for a sketch of histogram and cumulative consistency plots.
"""
" Get consistency for each model "
model_consistencies = pd.DataFrame(index=pop.results.index, columns=['Consistency'])
for model in pop.results.index:
model_consistency = model_consistency_in_region(model, pop, region, firing_pattern, stim_type, amp_stim_relationship)
model_consistencies.at[model, 'Consistency'] = model_consistency
pom_consistency = model_consistencies
return pom_consistency
def model_consistency_in_region(model, pop, region, firing_pattern, stim_type, amp_stim_relationship):
"""
Calculate consistency percentage for one model for the given firing pattern, population and simulation region
"""
num_simulations = 0 # Count total number of simulations or could calc directly from region and check we don't get any errors with
# any simulation in the region
num_consistent_simulations = 0
for simulation in region:
num_simulations += 1
sim_full_name = short_name_to_full(simulation, stim_type, amp_stim_relationship, delimiter='_', amp_units='pA')
simulation_firing_pattern = pop.results.at[model, (sim_full_name, 'Firing pattern')]
# Check if firing pattern is part of simulation firing pattern
# Check for nans
if type(simulation_firing_pattern) != list:
if np.isnan(simulation_firing_pattern):
continue
if firing_pattern in simulation_firing_pattern:
num_consistent_simulations += 1
model_consistency = 100.*(num_consistent_simulations/num_simulations)
return model_consistency
def assign_subpopulation_from_region(pop, region, criteria, verbose=False):
"""
Compute required consistencies and assign subpopulations to a population of models based on results from
a simulation region.
Inputs:
pop - a PopulationOfModels class
region - a list of simulations
criteria - has the format: {name:[firing pattern, opcode, value]}
E.g. {'single < 25':['single', '>=', 25]}
"""
import operator
opcodes = {'>':operator.gt, '>=':operator.ge, '<':operator.lt, '<=':operator.le}
# Build consistencies for each criteria
consistencies = pd.DataFrame(index=pop.results.index)
for criterion in criteria:
firing_pattern = criteria[criterion][0]
pom_consistency = pom_consistency_in_region(pop,
region,
firing_pattern=firing_pattern,
stim_type='step',
amp_stim_relationship={'step':1,'ramp':10})
consistencies[firing_pattern] = pom_consistency
# Find the models that fulfill all consistency criteria
models_passing_criteria = pd.DataFrame(index=consistencies.index)
for criterion, criterion_params in criteria.items():
firing_pattern = criterion_params[0]
opcode = criterion_params[1]
val = criterion_params[2]
op = opcodes[opcode]
consistency = consistencies[firing_pattern]
models_passing_criteria[criterion] = op(consistency,val)
models_passing_all_criteria = models_passing_criteria.all(axis=1)
# Filter away models that don't pass all criteria
subpopulation = pd.DataFrame(index=pop.results.index)
subpopulation = subpopulation[models_passing_all_criteria]
if verbose:
print('{} models out of {} in population of models are in the subpopulation'.format(
len(subpopulation.index), len(pop.results.index))
)
return subpopulation
def calculate_percentage_of_firing_patterns(firing_patterns):
"""
Calculate percentage of each firing pattern.
firing_patterns is a series or list of lists of firing patterns
"""
assert type(firing_patterns) == pd.Series, "firing_patterns is not a pandas Series"
firing_patterns = firing_patterns.dropna() # Clean up data
firing_patterns_in_this_sim = []
for model_fps in firing_patterns: # Iterate over models
for fp in model_fps: # Iterate over firing patterns for one model
firing_patterns_in_this_sim.append(fp)
set_of_firing_patterns_in_this_sim = set(firing_patterns_in_this_sim)
# Then iterate through and count each one
counts = {fp:0 for fp in set_of_firing_patterns_in_this_sim}
for model_fps in firing_patterns:
for fp in model_fps:
counts[fp] += 1
# Then divide each by number of models
percentages = {fp:None for fp in set_of_firing_patterns_in_this_sim}
num_models = len(firing_patterns)
for fp, val in counts.items():
percentages[fp] = 100.0*float(val)/num_models
return percentages
" Data analysis "
def pom_consistency_stats(pom_consistency):
return {'mean':pom_consistency['Consistency'].mean(), 'std':pom_consistency['Consistency'].std()}
def pom_consistency_hist(pom_consistency):
plt.hist(pom_consistency)
" Plotting "
def make_3d_plot(data, x, y, z, cutoff, fillna_value, labels, angle=(20,300), zticks=None, title=None):
"""
Construct a 3d plot
"""
df = pd.DataFrame(columns=["X","Y","Z"])
df['X'] = data[x].copy()
df['Y'] = data[y].copy()
df['Z'] = data[z].copy()
df = df.fillna(value=fillna_value,axis=1)
df.loc[df['Z'] >= cutoff, 'Z'] = fillna_value * 3 # Change cutoff points to be very low
#df['Z'][df['Z'] >= cutoff] = fillna_value * 3 # old redundant code setting on slice
# Make the plot
fig = plt.figure(figsize=(10,10))
ax = fig.gca(projection='3d')
# Title
if title is not None:
ax.set_title(title, fontsize=18, verticalalignment='baseline')
" Make the plot pop "
surf = ax.plot_trisurf(df['X'], df['Y'], df['Z'], cmap=plt.cm.viridis, linewidth=0.2)
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.xlabel(labels[0])
plt.ylabel(labels[1])
ax.set_zlabel(labels[2], fontsize=20, rotation=40)
if zticks != None:
ax.set_zticks(zticks)
# Rotate it
ax.view_init(angle[0], angle[1])
plt.tight_layout()
plt.show()
return fig, ax
def visualise_region(pop, region, stim_type, scaling_factor='GNav18'):
" Visualise region as scatter plot with one scaling factor and amp "
firing_pattern_percentages = get_firing_pattern_percentages(pop) # Excludes rheobase simulations
firing_pattern_percentages = process_firing_pattern_data(firing_pattern_percentages)
for sim in firing_pattern_percentages.index:
amp = firing_pattern_percentages.loc[sim, ('Amp', stim_type)]
sf_val = firing_pattern_percentages.loc[sim, ('Scaling factors', scaling_factor)]
plt.scatter(sf_val, amp, color='k')
# Now plot region
for sim, sim_vals in region.items():
amp = sim_vals['Amp']
sf_val = sim_vals[scaling_factor]
plt.scatter(sf_val, amp, color='r')
def pom_consistency_cumulative_plot(pom_consistency, bins=10):
" See https://stackoverflow.com/questions/15408371/cumulative-distribution-plots-python for code source"
data = pom_consistency['Consistency']
values, base = np.histogram(data, bins=bins)
cumulative = np.cumsum(values)
plt.plot(base[:-1], cumulative)
def plot_biomarker_boxplots(pop, biomarker, stim_amps, sim_type,
sim_types=['step', 'ramp'],
sim_type_amp_relationship={'step':1., 'ramp':10.},
base_sim_type='step',
save=False,
save_path=None,
fixed_scaling_factors={},
):
" Plot boxplots of biomarkers for constant stim amps varying scaling factor "
sim_names = pop.get_simulation_names()
sim_name_conversions = make_sim_name_conversions(sim_names, sim_types, sim_type_amp_relationship, base_sim_type)
for stim_amp in stim_amps:
simulations = get_simulations_with_fixed_stim_amp(pop, stim_amp, sim_type, sim_type_amp_relationship, base_sim_type, fixed_scaling_factors=fixed_scaling_factors)
# Build dataframe with the simulation and biomarker value of each model
df = pd.DataFrame()
for sim in simulations:
biomarker_data = | pd.DataFrame(columns=[biomarker]) | pandas.DataFrame |
# Created on 2020/7/15
# This module is for the class TimeSeries and related functions.
# Standard library imports
from datetime import datetime
from typing import Any, Callable, Optional, Union
import warnings
# Third party imports
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytz
import scipy.stats as stats
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
from sklearn.gaussian_process import GaussianProcessRegressor, kernels
from statsmodels.api import OLS
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from typeguard import typechecked
# Local application imports
from .. import exceptions
# Dictionary of Pandas' Offset Aliases
# and their numbers of appearance in a year.
DPOA = {'D': 365, 'B': 252, 'W': 52,
'SM': 24, 'SMS': 24,
'BM': 12, 'BMS': 12, 'M': 12, 'MS': 12,
'BQ': 4, 'BQS': 4, 'Q': 4, 'QS': 4,
'Y': 1, 'A':1}
# Datetimes format
fmt = "%Y-%m-%d %H:%M:%S"
fmtz = "%Y-%m-%d %H:%M:%S %Z%z"
#---------#---------#---------#---------#---------#---------#---------#---------#---------#
@typechecked
def get_list_timezones() -> None:
"""
Lists all the time zone names that can be used.
"""
print(pytz.all_timezones)
return None
# CLASS Series
@typechecked
class Series:
"""
Abstract class defining a Series and its methods.
This class serves as a parent class for TimeSeries and CatTimeSeries.
Attributes
----------
data : pandas.Series or pandas.DataFrame
Contains a time-like index and for each time a single value.
start_utc : Pandas.Timestamp
Starting date.
end_utc : Pandas.Timestamp
Ending date.
nvalues : int
Number of values, i.e. also of dates.
freq : str or None
Frequency inferred from index.
name : str
Name or nickname of the series.
unit : str or None
Unit of the series values.
tz : str
Timezone name.
timezone : pytz timezone
Timezone associated with dates.
"""
def __init__(self,
data: Union[pd.Series, pd.DataFrame, None]=None,
tz: str=None,
unit: str=None,
name: str=None
) -> None:
"""
Receives a panda.Series or pandas.DataFrame as an argument and initializes the time series.
"""
# Deal with DataFrame / Series
if (data is None) or (data.empty is True):
self.data = pd.Series(index=None, data=None)
self.start_utc = None
self.end_utc = None
self.nvalues = 0
self.freq = None
self.name = 'Empty TimeSeries'
else:
# Making sure the user entered a pandas.Series or pandas.DataFrame
# with just an index and one column for values
if isinstance(data, pd.DataFrame):
if data.shape[1] != 1:
raise AssertionError("Time series must be built from a pandas.Series or a pandas.DataFrame with only one value column.")
else:
self.data = pd.Series(data.iloc[:, 0])
elif not isinstance(data, pd.Series):
raise AssertionError("Time series must be built from a pandas.Series or a pandas.DataFrame with only one value column.")
else:
self.data = data
# Deal with time
if type(data.index[0]) == 'str':
data.index = pd.to_datetime(data.index, format=fmt)
self.start_utc = datetime.strptime(str(data.index[0]), fmt)
self.end_utc = datetime.strptime(str(data.index[-1]), fmt)
self.nvalues = data.shape[0]
else:
self.start_utc = data.index[0]
self.end_utc = data.index[-1]
self.nvalues = data.shape[0]
try:
self.freq = pd.infer_freq(self.data.index)
except:
self.freq = 'Unknown'
# Deal with unit
self.unit = unit
# Deal with timezone
if tz is None:
self.tz = 'UTC'
self.timezone = pytz.utc
else:
self.tz = tz
self.timezone = pytz.timezone(tz)
# Deal with name (nickname)
if name is None:
name = ""
self.name = name
def get_start_date_local(self) -> datetime.date:
"""
Returns the attribute UTC start date in local time zone defined by attribute timezone.
"""
start_tmp = datetime.strptime(str(self.start_utc), fmt).astimezone(self.timezone)
return datetime.strftime(start_tmp, format=fmtz)
def get_end_date_local(self) -> datetime.date:
"""
Returns the attribute UTC end date in local time zone defined by attribute timezone.
"""
end_tmp = datetime.strptime(str(self.end_utc), fmt).astimezone(self.timezone)
return datetime.strftime(end_tmp, format=fmtz)
def specify_data(self,
start: Union[str, datetime.date],
end: Union[str, datetime.date]
) -> Union[pd.Series, pd.DataFrame]:
"""
Returns the appropriate data according to user's specifying
or not the desired start and end dates.
"""
# Prepare data
if (start is None) and (end is None):
data = self.data
elif (start is None) and (end is not None):
data = self.data[:end]
elif (start is not None) and (end is None):
data = self.data[start:]
elif (start is not None) and (end is not None):
data = self.data[start:end]
return data
def start_end_names(self,
start: Union[str, datetime.date],
end: Union[str, datetime.date]
) -> (str, str):
"""
Recasts the time series dates to 10 characters strings
if the date hasn't been re-specified (i.e. value is 'None').
"""
s = str(self.start_utc)[:10] if (start is None) else start
e = str(self.end_utc)[:10] if (end is None) else end
return s, e
def is_sampling_uniform(self) -> bool:
"""
Tests if the sampling of a time series is uniform or not.
Returns a boolean value True when the sampling is uniform, False otherwise.
"""
# Prepare data
sampling = [datetime.timestamp(x) for x in self.data.index]
assert(len(sampling)==self.nvalues)
intervals = [sampling[x] - sampling[x-1] for x in range(1,self.nvalues,1)]
# Testing
prev = intervals[0]
for i in range(1,len(intervals),1):
if intervals[i] - prev > 1.e-6:
return False
return True
#---------#---------#---------#---------#---------#---------#---------#---------#---------#
# CLASS TimeSeries
@typechecked
class TimeSeries(Series):
"""
Class defining a time series and its methods.
This class inherits from the parent class 'Series'.
Attributes
----------
data : pandas.Series or pandas.DataFrame
Contains a time-like index and for each time a single value.
start_utc : Pandas.Timestamp
Starting date.
end_utc : Pandas.Timestamp
Ending date.
nvalues : int
Number of values, i.e. also of dates.
freq : str or None
Frequency inferred from index.
name : str
Name or nickname of the series.
tz : str
Timezone name.
timezone : pytz timezone
Timezone associated with dates.
type : str
Type of the series.
unit : str or None
Unit of the time series values.
"""
def __init__(self,
data: Union[pd.Series, pd.DataFrame, None]=None,
tz: str=None,
unit: str=None,
name: str=None
) -> None:
"""
Receives a pandas.Series or pandas.DataFrame as an argument and initializes the time series.
"""
super().__init__(data=data, tz=tz, unit=unit, name=name)
# Add attributes initialization if needed
self.type = 'TimeSeries'
### Plot INFORMATION ABOUT THE TIME SERIES ###
def simple_plot(self,
figsize: (float, float) = (12, 5),
dpi: float=100
) -> None:
"""
Plots the time series in a simple way.
Parameters
----------
figsize : 2-tuple of ints
Dimensions of the figure.
dpi : int
Dots-per-inch definition of the figure.
Returns
-------
None
None
"""
# Plot
plt.figure(figsize=figsize, dpi=dpi)
plt.plot(self.data.index, self.data.values, color='k')
# Make it cute
if self.name is None:
title = "Time series from " + str(self.start_utc)[:10] \
+ " to " + str(self.end_utc)[:10]
else:
title = "Time series " + self.name + " from " + str(self.start_utc)[:10] \
+ " to " + str(self.end_utc)[:10]
if self.tz is None:
xlabel = 'Date'
else:
xlabel = 'Date (' + self.tz + ')'
if self.unit is None:
ylabel = 'Value'
else:
ylabel = 'Value (' + self.unit + ')'
plt.gca().set(title=title, xlabel=xlabel, ylabel=ylabel)
plt.show()
return None
@typechecked
def distribution(self,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
bins: int=20,
figsize: (float, float) = (8, 4),
dpi: float=100
) -> None:
"""
Plots the distribution of values between two dates.
"""
# Prepare data
data = self.specify_data(start, end)
# Plot distribution of values
plt.figure(figsize=figsize, dpi=dpi)
data.hist(bins=bins, grid=False, color='w', lw=2, edgecolor='k')
# Make it cute
s,e = self.start_end_names(start, end)
title = "Distribution of values between " + s + " and " + e
plt.gca().set(title=title, xlabel="Value", ylabel="Hits")
plt.show()
return None
def density(self,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
bins: int=20,
figsize: (float, float) = (8, 4),
dpi: float=100
) -> None:
"""
Plots the density of values between two dates.
"""
# Prepare data
data = self.specify_data(start, end)
s,e = self.start_end_names(start, end)
# Plot distribution of values
fig, ax = plt.subplots(figsize=figsize, dpi=dpi)
data.plot.density(color='k', ax=ax, legend=False)
# Make it cute
title = "Density plot of values between " + s + " and " + e
plt.gca().set(title=title, xlabel="Value", ylabel="Density")
plt.show()
return None
def simple_plot_distrib(self,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
bins: int=20,
figsize: (float, float) = (10, 4),
dpi: float=100
) -> None:
"""
Plots the time series and its associated distribution of values between two dates.
"""
# Checks
assert(isinstance(bins,int))
# Prepare data
data = self.specify_data(start, end)
s,e = self.start_end_names(start, end)
# Plot
fig = plt.figure(figsize=figsize, dpi=dpi)
gs = fig.add_gridspec(1, 4)
# Plot 1 - Time Series simple plot
f_ax1 = fig.add_subplot(gs[:, 0:3])
f_ax1.plot(data.index, data.values, color='k')
if self.name is None:
title1 = "Time series from " + s + " to " + e
else:
title1 = "Time series " + self.name + " from " + s + " to " + e
if self.tz is None:
xlabel = 'Date'
else:
xlabel = 'Date (' + self.tz + ')'
if self.unit is None:
ylabel = 'Value'
else:
ylabel = 'Value (' + self.unit + ')'
plt.gca().set(title=title1, xlabel=xlabel, ylabel=ylabel)
# Plot 2 - Distribution of values
f_ax2 = fig.add_subplot(gs[:, 3:])
data.hist(bins=bins, grid=False, ax=f_ax2, orientation="horizontal", color='w', lw=2, edgecolor='k')
plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0.3, hspace=0)
title2 = "Distribution"
plt.gca().set(title=title2, xlabel=ylabel, ylabel="Hits")
plt.show()
return None
def get_sampling_interval(self) -> Union[str, datetime.date]:
"""
Returns the sampling interval for a uniformly-sampled time series.
"""
if(self.is_sampling_uniform()==False):
raise exceptions.SamplingError("Time series is not uniformly sampled.")
else:
idx1 = self.data.index[1]
idx0 = self.data.index[0]
intv = datetime.timestamp(idx1) - datetime.timestamp(idx0)
return intv
def lag_plot(self,
lag: int=1,
figsize: (float, float) = (5, 5),
dpi: float=100,
alpha: float=0.5
) -> None:
"""
Returns the scatter plot x_t v.s. x_{t-l}.
"""
# Check
try:
assert(lag>0)
except AssertionError:
raise AssertionError("The lag must be an integer equal or more than 1.")
# Do the plot
fig = plt.figure(figsize=figsize, dpi=dpi)
pd.plotting.lag_plot(self.data, lag=lag, c='black', alpha=alpha)
# Set title
if self.name is None:
tmp_name = " "
else:
tmp_name = self.name
title = "Lag plot of time series " + tmp_name
plt.gca().set(title=title, xlabel="x(t)", ylabel="x(t+"+str(lag)+")")
plt.show()
return None
def lag_plots(self,
nlags: int=5,
figsize: (float, float) = (10, 10),
dpi: float=100,
alpha: float=0.5
) -> None:
"""
Returns a number of scatter plots x_t v.s. x_{t-l}
where l is the lag value taken from [0,...,nlags].
Notes
-----
It is required that nlags > 1.
"""
# Check
try:
assert(nlags>1)
except AssertionError:
raise AssertionError("nlags must be an integer starting from 2.")
# Rule for the number of rows/cols
ncols = int(np.sqrt(nlags))
if(nlags % ncols == 0):
nrows = nlags // ncols
else:
nrows = nlags // ncols + 1
# Do the plots
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, sharex=True, sharey=True,
figsize=figsize, dpi=dpi)
for i, ax in enumerate(axes.flatten()[:nlags]):
pd.plotting.lag_plot(self.data, lag=i+1, ax=ax, c='black', alpha=alpha)
ax.set_xlabel("x(t)")
ax.set_ylabel("x(t+"+str(i+1)+")")
# Set title
if self.name is None:
tmp_name = " "
else:
tmp_name = self.name
title = "Multiple lag plots of time series " + tmp_name
fig.suptitle(title)
plt.show()
return None
### SIMPLE DATA EXTRACTION ON THE TIME SERIES ###
def hist_avg(self, start: Union[str, datetime.date]=None, end: Union[str, datetime.date]=None) -> float:
"""
Returns the historical average of the time series
between two dates (default is the whole series).
"""
data = self.specify_data(start, end)
avg = data.values.mean()
return avg
def hist_std(self, start: Union[str, datetime.date]=None, end: Union[str, datetime.date]=None) -> float:
"""
Returns the historical standard deviation of the time series
between two dates (default is the whole series).
"""
data = self.specify_data(start, end)
std = data.values.std()
return std
def hist_variance(self, start: Union[str, datetime.date]=None, end: Union[str, datetime.date]=None) -> float:
"""
Returns the historical variance of the time series
between two dates (default is the whole series).
"""
data = self.specify_data(start, end)
var = data.values.var()
return var
def hist_skewness(self, start: Union[str, datetime.date]=None, end: Union[str, datetime.date]=None) -> float:
"""
Returns the historical skew of the time series
between two dates (default is the whole series).
"""
data = self.specify_data(start, end)
skew = stats.skew(data.values)
return skew
def hist_kurtosis(self, start: Union[str, datetime.date]=None, end: Union[str, datetime.date]=None) -> float:
"""
Returns the historical (Fisher) kurtosis of the time series
between two dates (default is the whole series).
"""
data = self.specify_data(start, end)
kurt = stats.kurtosis(data.values, fisher=False)
return kurt
def min(self, start: Union[str, datetime.date]=None, end: Union[str, datetime.date]=None) -> float:
"""
Returns the minimum of the series.
"""
data = self.specify_data(start, end)
ts_min = data.values.min()
return ts_min
def max(self, start: Union[str, datetime.date]=None, end: Union[str, datetime.date]=None) -> float:
"""
Returns the maximum of the series.
"""
data = self.specify_data(start, end)
ts_max = data.values.max()
return ts_max
def describe(self, start: Union[str, datetime.date]=None, end: Union[str, datetime.date]=None) -> None:
"""
Returns description of time series between two dates.
This uses the pandas function having same name.
"""
data = self.specify_data(start, end)
print(data.describe())
return None
### METHODS THAT ARE CLOSER TO FINANCIAL APPLICATIONS ###
def percent_change(self,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
name: str=""
) -> 'TimeSeries':
"""
Returns the percent change of the series (in %).
Notes
-----
When computing the percent change, first date gets
NaN value and is thus removed from the time series.
"""
data = self.specify_data(start, end)
new_data = data.pct_change()
new_ts = TimeSeries(data=new_data[1:], tz=self.tz, unit='%', name=name)
return new_ts
# Alias method of percent_change()
# For people with a Finance terminology preference
net_returns = percent_change
def gross_returns(self,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
name: str=""
) -> 'TimeSeries':
"""
Returns the gross returns of the series (in %),
i.e. percent change + 1.
Notes
-----
When computing the percent change, first date gets
NaN value and is thus removed from the time series.
"""
data = self.specify_data(start, end)
new_data = 1 + data.pct_change()
new_ts = TimeSeries(new_data[1:], tz=self.tz, name=name)
return new_ts
def hist_vol(self,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
verbose: bool=True
) -> float:
"""
Computes the net returns of the time series and
returns their associated historical volatility
between two dates (default is the whole series).
Notes
-----
When computing the percent change, first date gets
NaN value and is thus removed from calculation.
Since pandas.Series.pct_change() returns values in
percent, we divide by 100 to bring back numerical values.
"""
# Initialization
data = self.specify_data(start, end)
# Warning message
if (self.is_sampling_uniform() is not True) and (verbose is True):
warnings.warn("Index not uniformly sampled. Result could be meaningless.")
# Warning message
if (0. in data.values) and (verbose is True):
warnings.warn("Zero value in time series, will generate infinite return.")
# Computing net returns
net_returns = data.pct_change()[1:]
# Compute standard deviation, i.e. volatility
std = net_returns.values.std()
return std
def annualized_vol(self,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
verbose: bool=True
) -> float:
"""
Returns the annualized volatility of the time series
between two dates (default is the whole series),
using the frequency of the time series when usable.
"""
# Initializations
hvol = self.hist_vol(start, end, verbose=verbose)
if (self.freq is not None) and (self.freq in DPOA.keys()):
return hvol * np.sqrt(DPOA[self.freq])
else:
raise ValueError('Annualized volatility could not be evaluated.')
def annualized_return(self,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
verbose: bool=True
) -> float:
"""
Returns the annualized return of the time series
between two dates (default is the whole series),
using the frequency of the time series when usable.
Arguments
---------
start : str or datetime
Starting date of selection.
end : str or datetime
Ending date of selection.
verbose : bool
Verbose option.
Returns
-------
float
Annualized return.
"""
# Initializations
gross_returns = self.gross_returns(start, end)
# Compute product of values
prd = gross_returns.data.prod()
# Checks
if (start is None) and (end is None):
assert(gross_returns.nvalues == self.nvalues-1)
if (gross_returns.freq != self.freq) and (verbose is True):
warning_message = "Gross_returns frequency and time series frequency do not match." \
+ " In that context, results may be meaningless."
warnings.warn(warning_message)
if (self.freq is not None) and (self.freq in DPOA.keys()):
return prd**(DPOA[self.freq]/gross_returns.nvalues) - 1
else:
raise ValueError('Annualized return could not be evaluated.')
def risk_ratio(self,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
verbose: bool=True
) -> float:
"""
Returns the risk ratio, i.e. the ratio of annualized return
over annualized volatility.
"""
ann_return = self.annualized_return(start, end)
ann_volatility = self.annualized_vol(start, end, verbose=verbose)
return ann_return / ann_volatility
def annualized_Sharpe_ratio(self,
risk_free_rate: float=0,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
verbose: bool=True
) -> float:
"""
Returns the Sharpe ratio, also known as risk adjusted return.
"""
ann_return = self.annualized_return(start, end)
ann_volatility = self.annualized_vol(start, end, verbose=verbose)
return (ann_return - risk_free_rate) / ann_volatility
### METHODS RELATED TO VALUE AT RISK ###
def hist_var(self,
p: float,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None
) -> float:
"""
Returns the historical p-VaR (Value at Risk) between two dates.
Returns
-------
float
VaR value computed between the chosen dates.
"""
# Checks
assert(p>=0 and p<=1)
if 100 * p % 1 != 0:
warning_message = f"Probability too precise, only closest percentile computed here." \
+ f"Hence for p = {str(p)} , percentile estimation is based on p = {str(int(100 * p))} %."
warnings.warn(warning_message)
# Prepare data
data = self.specify_data(start, end)
return np.percentile(data.values, int(100*p))
def hist_cvar(self,
p: float,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None
) -> float:
"""
Returns the historical CVaR (Conditional Value at Risk) between two dates.
This quantity is also known as the Expected Shortfall (ES).
Returns
-------
float
CVaR value computed between the chosen dates.
"""
# Checks
assert(p>=0 and p<=1)
if 100*p%1 != 0:
warning_message = "Probability too precise, only closest percentile computed here." \
+ "Hence for p = " + str(p) + " , percentile estimation is based on p = " + str(int(100*p)) + " %."
warnings.warn(warning_message)
# Prepare data
data = self.specify_data(start, end)
var = self.hist_var(p=p, start=start, end=end)
# Computing CVaR
tmp_sum = 0
tmp_n = 0
for val in data.values:
if val <= var:
tmp_sum += val
tmp_n += 1
return tmp_sum / tmp_n
# Alias method of hist_cvar
# For people with a Finance terminology preference
hist_expected_shortfall = hist_cvar
def cornish_fisher_var(self,
p: float,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None
) -> float:
"""
Returns the VaR (Value at Risk) between two dates from
the Cornish-Fisher expansion.
Returns
-------
float
VaR value computed between the chosen dates.
"""
# Checks
assert(p>=0 and p<=1)
# Prepare data
data = self.specify_data(start, end)
# Compute z-score based on normal distribution
z = stats.norm.ppf(p)
# Compute modified z-score from expansion
s = stats.skew(data.values)
k = stats.kurtosis(data.values, fisher=False)
new_z = z + (z**2 - 1) * s/6 + (z**3 - 3*z) * (k-3)/24 \
- (2*z**3 - 5*z) * (s**2)/36
return data.values.mean() + new_z * data.values.std(ddof=0)
### AUTOCORRELATION COMPUTATION ###
def autocorrelation(self,
lag: int=1,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None
) -> float:
"""
Returns the autocorrelation of the time series for a specified lag.
We use the function:
$rho_l = frac{Cov(x_t, x_{t-l})}{\sqrt(Var[x_t] Var[x_{t-l}])}
where $x_t$ is the time series at time t.
Cov denotes the covariance and Var the variance.
We also use the properties $rho_0 = 1$ and $rho_{-l} = rho_l$
(using LaTeX notations here).
"""
# Initialization
l = abs(lag)
# Trivial case
if l==0:
return 1
# Prepare data
data = self.specify_data(start, end)
# General case
assert(l < data.shape[0])
shifted_data = data.shift(l)
mu = data.mean()
sigma = data.std()
numerator = np.mean((data - mu) * (shifted_data - mu))
denominator = sigma**2
return numerator / denominator
def plot_autocorrelation(self,
lag_min: int=0,
lag_max: int=25,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
figsize: (float, float) = (8, 4),
dpi: float=100
) -> None:
"""
Uses autocorrelation method in order to return a plot
of the autocorrelation againts the lag values.
"""
# Checks
assert(lag_max > lag_min)
# Computing autocorrelation
x_range = list(range(lag_min, lag_max+1, 1))
ac = [self.autocorrelation(lag=x, start=start, end=end) for x in x_range]
# Plot
plt.figure(figsize=figsize, dpi=dpi)
plt.bar(x_range, ac, color='w', lw=2, edgecolor='k')
s,e = self.start_end_names(start, end)
title = "Autocorrelation from " + s + " to " + e + " for lags = [" \
+ str(lag_min) + "," + str(lag_max) + "]"
plt.gca().set(title=title, xlabel="Lag", ylabel="Autocorrelation Value")
plt.show()
return None
def acf_pacf(self,
lag_max: int=25,
figsize: (float, float) = (12, 3),
dpi: float=100
) -> None:
"""
Returns a plot of the AutoCorrelation Function (ACF)
and Partial AutoCorrelation Function (PACF) from statsmodels.
"""
# Plot
fig, axes = plt.subplots(1,2, figsize=figsize, dpi=dpi)
plot_acf(self.data.values.tolist(), lags=lag_max, ax=axes[0])
plot_pacf(self.data.values.tolist(), lags=lag_max, ax=axes[1])
plt.show()
return None
### SIMPLE TRANSFORMATIONS OF THE TIME SERIES TO CREATE A NEW TIME SERIES ###
def trim(self,
new_start: Union[str, datetime.date],
new_end: Union[str, datetime.date]
) -> 'TimeSeries':
"""
Method that trims the time series to the desired dates
and send back a new time series.
"""
new_data = self.data[new_start:new_end]
if name is None:
name = self.name
new_ts = TimeSeries(data=new_data, tz=self.tz, unit=self.unit, name=name)
return new_ts
def add_cst(self,
cst: float=0
) -> 'TimeSeries':
"""
Method that adds a constant to the time series.
"""
new_data = self.data + cst
if name is None:
name = self.name
new_ts = TimeSeries(data=new_data, tz=self.tz, unit=self.unit, name=name)
return new_ts
def mult_by_cst(self,
cst: float=1
) -> 'TimeSeries':
"""
Method that multiplies the time series by a constant.
"""
new_data = self.data * cst
if name is None:
name = self.name
new_ts = TimeSeries(data=new_data, tz=self.tz, unit=self.unit, name=name)
return new_ts
def linear_combination(self,
other_ts: 'TimeSeries',
factor1: float=1,
factor2: float=1):
"""
Method that adds a time series to the current one
according to linear combination:
factor1 * current_ts + factor2 * other_ts.
"""
# Checks
if (self.unit != other_ts.unit):
raise AssertionError("Time series to combine must have same unit.")
# Compute linear combination
new_data = factor1 * np.array(self.data.values) + factor2 * np.array(other_ts.data.values)
new_data = pd.Series(index=self.data.index, data=new_data)
new_ts = TimeSeries(data=new_data, tz=self.tz, unit=self.unit, name=name)
return new_ts
def convolve(self,
func: Callable[[float], float],
x_min: float,
x_max: float,
n_points: int,
normalize: bool=False,
name: str=None
) -> 'TimeSeries':
"""
Performs a convolution of the time series with a function 'func'.
The 'normalize' option allows to renormalize 'func' such that
the sum of its values is one.
Parameters
----------
func : function
Function we want to employ for convolution.
x_min : float
Minimum value to consider for 'func'.
x_max : float
Maximum value to consider for 'func'.
n_points : int
Number of points to consider in the function.
normalize: bool
Option to impose the sum of func values to be 1.
name : str
New name.
Returns
-------
TimeSeries
Convolved time series.
"""
# Getting the time series values
ts_vals = self.data.values
# Getting the convolving function values
X = np.linspace(x_min, x_max, n_points)
func_vals = []
for x in X:
func_vals.append(func(x))
if normalize==True:
sum_vals = np.array(func_vals).sum()
func_vals /= sum_vals
# Dealing with name
if name is None:
name = self.name + str('-Convolved')
# Generate convolved values
convolved_vals = np.convolve(func_vals, ts_vals.flatten(), mode='same')
if name is None:
name = "Convolved-" + self.name
convolved_ts = TimeSeries(data= | pd.Series(index=self.data.index, data=convolved_vals) | pandas.Series |
import simpy
import datetime
import shapely
import shapely.geometry
import numpy as np
import pandas as pd
import openclsim.core as core
from scipy import interpolate
class GrainSize:
"""
Add information on the grainsize to the object
"""
def __init__(self, grain_size, *args, **kwargs):
super().__init__(*args, **kwargs)
"""Initialization"""
self.grain_size = grain_size
class Pipeline:
"""
Add information on the pipeline length to the object
"""
def __init__(self, pipeline_lengths, volumes, nourishment_volume, *args, **kwargs):
super().__init__(*args, **kwargs)
"""Initialization"""
pipeline_lengths.insert(0, pipeline_lengths[0])
volumes.insert(0, 0)
pipeline_lengths.append(pipeline_lengths[-1])
volumes.append(volumes[-1] + 100000)
# The total nourishment volume
self.nourishment_volume = nourishment_volume
# The interpolation function
self.interpolate_function = interpolate.interp1d(volumes, pipeline_lengths, kind = "previous")
def pipeline_length(self, volume):
"""
Determine the length of the pipeline based on the sinker location and nourishmed volumes.
"""
# Pumping duration
return self.interpolate_function(volume) * 1
class TripCounter(core.HasContainer):
"""
Create an event that increases the number of trips and calls event.succeed() once a number of trips is achieved.
"""
def __init__(self, activity_id, *args, **kwargs):
super().__init__(*args, **kwargs)
"""Initialization"""
self.activity_id = activity_id
class LoadingFunction:
"""
Create a loading function and add it a processor.
This is a generic and easy to read function, you can create your own LoadingFunction class and add this as a mixin.
loading_rate: the rate at which units are loaded per second
manoeuvring: the time it takes to manoeuvring in minutes
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
"""Initialization"""
def loading(self, origin, destination, amount, add_trip = True):
"""
Determine the duration of the loading event.
This duration based is based on the pumped volume, pipeline length and grain size.
"""
self.grain_size = origin.grain_size
self.origin = origin
counter = 1
for msg in self.log["Message"]:
if "unloading start" in msg:
counter += 1
technical_delay_dredging = 0
if add_trip:
self.trips[self.ActivityID].container.put(1)
if origin.container.level - amount < self.min_filling / 100 * self.container.capacity:
for _ in range(self.trips[self.ActivityID].container.capacity - self.trips[self.ActivityID].container.level):
self.trips[self.ActivityID].container.put(1)
if self.delay:
if counter == self.delay["Trip"] and "Dredging" == self.delay["When"]:
technical_delay_dredging = self.delay["Duration"]
self.log_entry(
"scheduled delay start",
self.env.now,
self.container.level,
self.geometry,
self.ActivityID,
)
self.log_entry(
"scheduled delay stop",
self.env.now + technical_delay_dredging,
self.container.level,
self.geometry,
self.ActivityID,
)
if origin.name == "Area 212":
return amount / (22_507 / 104) * 60 + technical_delay_dredging
elif origin.name == "Area 228":
return amount / (22_507 / 133) * 60 + technical_delay_dredging
elif origin.name == "Area 494":
return amount / (22_507 / 104) * 60 + technical_delay_dredging
elif origin.name == "Area 511":
return amount / (22_507 / 133) * 60 + technical_delay_dredging
class UnloadingFunction:
"""
Create an unloading function and add it a processor.
This is a generic and easy to read function, you can create your own LoadingFunction class and add this as a mixin.
unloading_rate: the rate at which units are loaded per second
manoeuvring: the time it takes to manoeuvring in minutes
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
"""Initialization"""
def unloading(self, origin, destination, amount, add_trip = True):
"""
Determine the duration of the unloading event.
This duration based is based on the pumped volume, pipeline length and grain size.
"""
counter = 1
for msg in self.log["Message"]:
if "unloading start" in msg:
counter += 1
technical_delay_pumping = 0
if add_trip:
if not hasattr(self, "previous_destination"):
self.previous_destination = destination
else:
if destination != self.previous_destination:
additional_amount = self.previous_destination.container.level - self.previous_destination.nourishment_volume
if 0 < additional_amount:
self.previous_destination.container.reserve_get(additional_amount)
self.previous_destination.container.get(additional_amount)
destination.container.reserve_put(additional_amount)
destination.container.put(additional_amount)
self.previous_destination = destination
if destination.nourishment_volume < destination.container.level + amount:
for _ in range(self.trips[self.ActivityID].container.capacity - self.trips[self.ActivityID].container.level):
self.trips[self.ActivityID].container.put(1)
if (destination.container.capacity - destination.container.level - 5) < self.container.level:
destination.container.get(5)
if self.delay:
if counter == self.delay["Trip"] and "Pumping" == self.delay["When"]:
technical_delay_pumping = self.delay["Duration"]
self.log_entry(
"scheduled delay start",
self.env.now,
self.container.level,
self.geometry,
self.ActivityID,
)
self.log_entry(
"scheduled delay stop",
self.env.now + technical_delay_pumping,
self.container.level,
self.geometry,
self.ActivityID,
)
grain_size = origin.grain_size
various_pipeline_lengths = [
0,
500,
1000,
1500,
2000,
2500,
3000,
3500,
4000,
4500,
5000,
]
# Area 212 and Area 494
# Average grain size = 373 um
if grain_size == 373:
various_grainsizes = [
214,
209,
204,
200,
194,
176,
157,
132,
111,
80,
59,
]
# Area 228
# Average grain size = 710 um
elif grain_size == 710:
various_grainsizes = [
214,
209,
195,
145,
109,
70,
53,
40,
31,
26,
20,
]
# Area 511
# Average grain size = 533 um
elif grain_size == 553:
various_grainsizes = [
214,
209,
204,
186,
144,
114,
85,
61,
47,
38,
29,
]
interpolate_function = interpolate.interp1d(various_pipeline_lengths, various_grainsizes)
# Pumping duration
pumping_duration = 0
destination_volume = destination.container.level
# Calculate per 1_000 m3
while destination_volume < destination.container.level + amount:
pipeline_length = destination.pipeline_length(destination_volume) * 1
pumping_duration += 1000 / interpolate_function(pipeline_length)
destination_volume += 1000
# (Dis)connecting duration
dis_connecting = 20
# Return seconds
return (pumping_duration + dis_connecting) * 60 + technical_delay_pumping
class HasDepthRestriction:
"""HasDepthRestriction class
Used to add depth limits to vessels
draught: should be a lambda function with input variable container.volume
waves: list with wave_heights
ukc: list with ukc, corresponding to wave_heights
filling: filling degree [%]
min_filling: minimal filling degree [%]
max_filling: max filling degree [%]
"""
def __init__(
self,
compute_draught,
ukc,
waves=None,
filling=None,
min_filling=None,
max_filling=None,
step_size=1,
*args,
**kwargs
):
super().__init__(*args, **kwargs)
"""Initialization"""
# Information required to determine whether vessel can access an area
self.compute_draught = compute_draught
self.ukc = ukc
self.waves = waves
# Information require to self-select filling degree
if min_filling is not None and max_filling is not None:
assert min_filling <= max_filling
self.filling = int(filling) if filling is not None else None
self.min_filling = int(min_filling) if min_filling is not None else int(0)
self.max_filling = int(max_filling) if max_filling is not None else int(100)
self.step_size = step_size
if not self.filling:
self.filling_degrees = np.linspace(
self.min_filling,
self.max_filling,
(self.max_filling - self.min_filling) / self.step_size + 1,
dtype=int,
)
else:
self.fill_degrees = [self.filling]
if not (max_filling - min_filling) % step_size == 0:
raise ValueError("The difference between the minimum filling degree and maximum filling degree is not a multiple of the step size.")
self.depth_data = {}
def calc_water_depth(self, location, new_fill_degree = False):
"""
Calculate the required waterdepth at the location for all times in the series
"""
if not new_fill_degree:
self.depth_data[location.name] = {}
for i in self.filling_degrees:
filling_degree = i / 100
# Determine characteristics based on filling
draught = self.compute_draught(filling_degree)
# Make dataframe based on characteristics
df = location.metocean_data.copy()
df["Required depth"] = df[location.waveheight].apply(
lambda s: self.calc_required_depth(draught, s)
)
self.depth_data[location.name][i] = df
else:
# Determine characteristics based on filling
draught = self.compute_draught(new_fill_degree / 100)
# Make dataframe based on characteristics
df = location.metocean_data.copy()
df["Required depth"] = df[location.waveheight].apply(
lambda s: self.calc_required_depth(draught, s)
)
self.depth_data[location.name][new_fill_degree] = df
def check_optimal_filling(self, loader, unloader, origin, destination):
"""
Check per varying filling degree:
- waiting time
- time back on site
- volume transported
"""
self.trip_selector = {
"Filling Degree": [],
"Volume": [],
"Back on site": [],
"Waiting": [],
"Cycle production": [],
"Trip production": [],
}
if destination.name not in self.depth_data.keys():
self.calc_water_depth(destination)
counter = 1
for msg in self.log["Message"]:
if "unloading start" in msg:
counter += 1
pumping_delay_duration = 0
dredging_delay_duration = 0
if self.delay:
if counter == self.delay["Trip"]:
if self.delay["When"] == "Dredging":
dredging_delay_duration = self.delay["Duration"]
elif self.delay["When"] == "Pumping":
pumping_delay_duration = self.delay["Duration"]
for i in self.filling_degrees:
# Get date between now and 1 week ahead
t_now = datetime.datetime.fromtimestamp(self.env.now - 3600)
t_end = datetime.datetime.fromtimestamp(self.env.now + 7 * 24 * 3600)
# Make dataframe based on characteristics
df = self.depth_data[destination.name][i].copy()
df = df[(df.index >= t_now) & (df.index <= t_end)]
# Sailing distance
distance = self.get_distance(origin.geometry, destination, verbose = False)[0]
# Dredged amount
amount = i / 100 * self.container.capacity
# Determine the duration of each step
duration_sailing_e = distance / self.compute_v(0)
duration_sailing_f = distance / self.compute_v(i / 100)
duration_dredging = self.loading(origin, destination, amount, add_trip = False)
duration_pumping = self.unloading(self, destination, amount, add_trip = False)
# Required window is the length of the pumping duration
window_duration = duration_pumping
# The arrival time
arrival = self.env.now + duration_sailing_e + duration_dredging + duration_sailing_f + pumping_delay_duration + dredging_delay_duration
# The cycle finish time
finish = duration_sailing_e + duration_dredging + duration_sailing_f + duration_pumping + pumping_delay_duration + dredging_delay_duration
# Check when the required depth <= water level
series = pd.Series(df["Required depth"] <= df[destination.waterdepth])
# Determine waiting time
waiting = self.determine_waiting_time(series, arrival, window_duration, self.compute_draught(i / 100))
# Back on site
back_on_site = finish + waiting
# Save trip information
self.trip_selector["Filling Degree"].append(int(i))
self.trip_selector["Volume"].append(amount)
self.trip_selector["Back on site"].append(back_on_site)
self.trip_selector["Waiting"].append(waiting)
self.trip_selector["Cycle production"].append(amount / finish)
self.trip_selector["Trip production"].append(amount / back_on_site)
df = | pd.DataFrame.from_dict(self.trip_selector) | pandas.DataFrame.from_dict |
import numpy as np
import pandas as pd
import xgboost as xgb
from sklearn.preprocessing import Imputer
from sklearn.cross_validation import StratifiedShuffleSplit
from scipy.sparse import csr_matrix
from sklearn.metrics import log_loss
# Input data files are available in the "../input/" directory.
# Any results you write to the current directory are saved as output.
def clean_encode_data(train, test, attribs_todrop=None):
float_imputer = Imputer(missing_values='NaN', strategy='mean')
if attribs_todrop is not None:
print('Dropping attributes with no influence on the target variable:\n')
print('%s\n' % attribs_todrop)
# dropping unimportant attributes from data sets
train.drop(attribs_todrop, axis=1, inplace=True)
test.drop(attribs_todrop, axis=1, inplace=True)
# for nominal categorical variables:
# create a pair of empty pandas DFs to keep dummies values
train_dummies = pd.DataFrame(index=train.index)
test_dummies = pd.DataFrame(index=test.index)
print('Clean data...\n')
print('Encoding the nominal (ordinal) categorical variables...\n')
features = train.columns
for col in features:
# encoding nominal categorical variables
# OneHotEncoding
if train[col].dtype == 'O':
print('Encoding the nominal categorical variable \'%s\'.' % col)
train_col_dummies = pd.get_dummies(train[col], prefix=col, prefix_sep='.', dummy_na=True)
train_dummies = train_dummies.join(train_col_dummies, how='left')
train.drop(col, axis=1, inplace=True)
test_col_dummies = pd.get_dummies(test[col], prefix=col, prefix_sep='.', dummy_na=True)
test_dummies = test_dummies.join(test_col_dummies, how='left')
test.drop(col, axis=1, inplace=True)
print('\n')
else:
# encoding ordinal categorical variables
# pandas.factorize
if train[col].dtype == 'int64':
print('Encode the ordinal categorical variable \'%s\'.' % col)
train[col], tmp_indexer = pd.factorize(train[col], na_sentinel=0)
test[col] = tmp_indexer.get_indexer(test[col])
print('\n')
# imputing float means
# sklearn-Imputer
if train[col].dtype == 'float64':
print('Imputing the \'na\' values of the float variable \'%s\' with its mean.' % col)
train_col = np.array(train[[col]])
train[col] = float_imputer.fit_transform(train_col)
print('Imputing the \'na\' values of the float variable \'%s\' with its mean.' % col)
test_col = np.array(test[[col]])
test[col] = float_imputer.transform(test_col)
print('\n')
features = train_dummies.columns.join(test_dummies.columns, how='inner')
train = pd.concat([train, train_dummies[features]], axis=1, copy=True)
test = pd.concat([test, test_dummies[features]], axis=1, copy=True)
print('The categorical variables of the data sets have been encoded successfully!\n')
print('The \'na\' values of the float variables have been imputed (mean) successfully!\n')
return train, test
def remove_collinear_predictors(train, test, attribs_of_interest, threshold=0.9):
# REMOVE COLLINEAR PREDICTORS
"""
:type train: pandas DataFrame
:type test: pandas DataFrame
"""
print('Determining the collinear float attributes...\n')
print('Collinearity Threshold: %.2f\n' % threshold)
# select the train/test subset of dtype floats
train_float = train[attribs_of_interest].copy()
test_float = test[attribs_of_interest].copy()
# compute the pair-wise correlations for the attributes in data sets
train_corrs = train_float.corr()
test_corrs = test_float.corr()
# determine the high correlated for the given threshold
train_high_corrs = train_corrs[(abs(train_corrs) > threshold) & (train_corrs != 1)]
test_high_corrs = test_corrs[(abs(test_corrs) > threshold) & (test_corrs != 1)]
# verify that for the chosen collinearity threshold
# the same subset of attributes will be removed from train and test data sets
train_boolean_high_corrs = train_high_corrs.notnull()
test_boolean_high_corrs = test_high_corrs.notnull()
if train_boolean_high_corrs.equals(test_boolean_high_corrs):
collinear_features = pd.Series()
features = train_boolean_high_corrs.columns
for col in features:
collinear_features_tmp = (train_boolean_high_corrs.index[train_boolean_high_corrs[col]]).to_series()
collinear_features = collinear_features.append(collinear_features_tmp)
collinear_features.drop_duplicates(inplace=True)
print('Collinear Predictors:')
print('------------------------')
print(list(collinear_features))
print('\n')
print('Removing the collinear predictors from both train and test data set...\n')
train_float.drop(collinear_features, axis=1, inplace=True)
test_float.drop(collinear_features, axis=1, inplace=True)
# keep the non float variables of the original data sets
# in a different pair of Pandas DataFrames
features_non_float_train = train.columns.difference(attribs_of_interest)
features_non_float_test = test.columns.difference(attribs_of_interest)
train_non_float = train[features_non_float_train].copy()
test_non_float = test[features_non_float_test].copy()
# returns the original data sets without the observed collinear predictors
train = | pd.concat([train_float, train_non_float], axis=1) | pandas.concat |
# info_nce_loss
import torch
import torch.nn.functional as F
import datetime
# percent replicating
import pandas as pd
import matplotlib.pyplot as plt
import random
import numpy as np
import utils_benchmark
# heatmapCorrelation
import seaborn as sns
# Umap
import umap
import umap.plot
import colorcet as cc
def now():
return str(datetime.datetime.now())+': '
def my_collate(batch):
data = [item[0] for item in batch]
data = torch.cat(data, dim=0)
target = [item[1] for item in batch]
target = torch.cat(target, dim=0)
return [data, target]
def train_val_split(metadata_df, Tsplit=0.8):
df = metadata_df[['labels', 'plate1']]
df = pd.concat([df.rename(columns={'plate1': 'plate2'}), metadata_df[['labels', 'plate2']]])
df = pd.concat([df.rename(columns={'plate2': 'plate3'}), metadata_df[['labels', 'plate3']]])
df = pd.concat([df.rename(columns={'plate3': 'plate4'}), metadata_df[['labels', 'plate4']]])
df = df.rename(columns={'plate4': 'well_path'})
split = int(len(df.index)*Tsplit)
return [df.iloc[:split, :].reset_index(drop=True), df.iloc[split:, :].reset_index(drop=True)]
def filterData(df, filter, encode=None, mode='default'):
if 'negcon' in filter: # drop all negcon wells
if mode == 'default':
df = df[df.control_type != 'negcon']
elif mode == 'eval':
df = df[df.Metadata_control_type != 'negcon']
df = df.reset_index(drop=True)
if encode!=None:
if mode=='default':
pd.options.mode.chained_assignment = None # default='warn'
obj_df = df[encode].astype('category')
df['labels'] = obj_df.cat.codes
df = df.sort_values(by='labels')
df = df.reset_index(drop=True)
elif mode == 'eval':
pd.options.mode.chained_assignment = None # default='warn'
obj_df = df[encode].astype('category')
df['Metadata_labels'] = obj_df.cat.codes
df = df.sort_values(by='Metadata_labels')
df = df.reset_index(drop=True)
return df
#######################
#%% EVALUATION STUFF ##
#######################
def createUmap(df, nSamples, mode='default'):
plt.figure(figsize=(14, 10), dpi=300)
labels = df.Metadata_labels.iloc[:nSamples]
if mode == 'default':
features = df.iloc[:nSamples, :-1]
elif mode == 'old':
features = df.iloc[:nSamples, 12:]
reducer = umap.UMAP()
embedding = reducer.fit(features)
umap.plot.points(embedding, labels=labels, theme='fire')
plt.gca().set_aspect('equal', 'datalim')
plt.title('UMAP')
plt.show()
return
def compoundCorrelation(df, Ncompounds=20):
plt.figure(figsize=(14, 10), dpi=300)
df = df.transpose()
df = df.iloc[:, :Ncompounds]
Var_Corr = df.corr()
#plot the heatmap
sns.heatmap(Var_Corr, xticklabels=Var_Corr.columns, yticklabels=Var_Corr.columns, annot=True)
plt.title('compound Correlation')
plt.show()
return
def featureCorrelation(df, Nfeatures=20):
plt.figure(figsize=(14, 10), dpi=300)
df = df.iloc[:, :Nfeatures]
Var_Corr = df.corr()
#plot the heatmap
sns.heatmap(Var_Corr, xticklabels=Var_Corr.columns, yticklabels=Var_Corr.columns, annot=True)
plt.title('feature Correlation')
plt.show()
return
def CalculatePercentReplicating(dfs, group_by_feature, n_replicates, n_samples=10000, description='Unknown'):
"""
:param dfs: list of plate dataframes that are analysed together.
:param group_by_feature: feature column which is used to make replicates
:param n_replicates: number of expected replicates present in the given dataframes 'dfs' based on the 'group_by_feature' column
:param n_samples: number of samples used to calculate the null distribution, often 10000
:return: dataframe consisting of the calculated metrics
"""
# Set plotting and sampling parameters
random.seed(9000)
plt.style.use("seaborn-ticks")
plt.rcParams["image.cmap"] = "Set1"
plt.rcParams['axes.prop_cycle'] = plt.cycler(color=plt.cm.Set1.colors)
corr_replicating_df = pd.DataFrame()
# Merge dfs in list
try:
data_df = | pd.concat(dfs) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 27 17:50:38 2020
@author: Miguel <NAME>
Descrption: Script for reading the ISO NE dataset for load profiling in the context
of the paper of NMF Correlated. It takes time series of real time demand, dew point,
and temperature of a particular load zone selected by "location":
0: ME
1: NH
2: VT
3: CT
4: RI
5: SEMASS
6: WCMASS
7: NEMASSBOST
Output: Data_test and Data_train, both of them data structures containing:
Date, Day of the year, 24 values of hourly Real time,24 values of hourly Temperature,
24 values of hourly Dew point and the Weekday. The split into train and test of
the whole data set is defined by a date specified by the variables "day", "month" and "year/"
"""
import pandas as pd
import datetime
import scipy
import scipy.io
import numpy as np
import pickle
from pathlib import Path
LOCATIONS = ['ME','NH','VT','CT','RI','SEMASS','WCMASS','NEMASSBOST']
project_path = Path("/Users/apple/Desktop/PASAR")
#==================================================================
# SELEECT DATE THAT SPLITS DATA SET INTO TRAIN AND TEST
#==================================================================
#==================================================================
start_day_train_val = 1
start_month_train_val = 1
start_year_train_val= 2011
end_day_train_val = 31
end_month_train_val = 12
end_year_train_val = 2017
start_day_test = 1
start_month_test = 1
start_year_test = 2018
end_day_test = 31
end_month_test = 12
end_year_test = 2018
#==================================================================
data_folder = Path("/Users/apple/Desktop/PASAR/ISO_NE_Dataset_Final/Nestor")
filename = "iso_ne.pickle"
file_to_open = data_folder / filename
pickle_in=open(file_to_open,'rb')
iso_ne=pickle.load(pickle_in)
for location in range(0,8):
location_name = LOCATIONS[location]
data2011=iso_ne[location][0]
data2012=iso_ne[location][1]
data2013=iso_ne[location][2]
data2014=iso_ne[location][3]
data2015=iso_ne[location][4]
data2016=iso_ne[location][5]
data2017=iso_ne[location][6]
data2018=iso_ne[location][7]
Y2011=data2011[['Date','Hour','DEMAND','DryBulb','DewPnt']]
Y2012=data2012[['Date','Hour','DEMAND','DryBulb','DewPnt']]
Y2013=data2013[['Date','Hour','DEMAND','DryBulb','DewPnt']]
Y2014=data2014[['Date','Hour','DEMAND','DryBulb','DewPnt']]
Y2015=data2015[['Date','Hour','DEMAND','DryBulb','DewPnt']]
Y2016=data2016[['Date','Hr_End','RT_Demand','Dry_Bulb','Dew_Point']]
Y2017=data2017[['Date','Hr_End','RT_Demand','Dry_Bulb','Dew_Point']]
Y2018=data2018[['Date','Hr_End','RT_Demand','Dry_Bulb','Dew_Point']]
Aux2011 = pd.to_datetime(Y2011['Date']).dt.strftime('%d-%b-%Y')
Dates2011 = pd.Series(list(Aux2011[0::24]))
DoWeek2011 = pd.to_datetime(Dates2011).dt.day_name()
Load2011 = pd.Series(list(Y2011['DEMAND'].values.reshape(-1,24)))
Temperature2011 = pd.Series(list(Y2011['DryBulb'].values.reshape(-1,24)))
DewPoint2011 = pd.Series(list(Y2011['DewPnt'].values.reshape(-1,24)))
del Y2011
frame2011 = { 'Date': Dates2011, 'Weekday': DoWeek2011}
frame2011['Load'] = list(Load2011)
frame2011['Temperature'] = list(Temperature2011)
frame2011['DewPoint'] = list(DewPoint2011)
Y2011 = pd.DataFrame(frame2011)
Aux2012 = pd.to_datetime(Y2012['Date']).dt.strftime('%d-%b-%Y')
Dates2012 = pd.Series(list(Aux2012[0::24]))
DoWeek2012 = pd.to_datetime(Dates2012).dt.day_name()
Load2012 = pd.Series(list(Y2012['DEMAND'].values.reshape(-1,24)))
Temperature2012 = pd.Series(list(Y2012['DryBulb'].values.reshape(-1,24)))
DewPoint2012 = pd.Series(list(Y2012['DewPnt'].values.reshape(-1,24)))
del Y2012
frame2012 = { 'Date': Dates2012, 'Weekday': DoWeek2012}
frame2012['Load'] = list(Load2012)
frame2012['Temperature'] = list(Temperature2012)
frame2012['DewPoint'] = list(DewPoint2012)
Y2012 = pd.DataFrame(frame2012)
Aux2013 = pd.to_datetime(Y2013['Date']).dt.strftime('%d-%b-%Y')
Dates2013 = pd.Series(list(Aux2013[0::24]))
DoWeek2013 = pd.to_datetime(Dates2013).dt.day_name()
Load2013 = pd.Series(list(Y2013['DEMAND'].values.reshape(-1,24)))
Temperature2013 = pd.Series(list(Y2013['DryBulb'].values.reshape(-1,24)))
DewPoint2013 = pd.Series(list(Y2013['DewPnt'].values.reshape(-1,24)))
del Y2013
frame2013 = { 'Date': Dates2013, 'Weekday': DoWeek2013}
frame2013['Load'] = list(Load2013)
frame2013['Temperature'] = list(Temperature2013)
frame2013['DewPoint'] = list(DewPoint2013)
Y2013 = pd.DataFrame(frame2013)
Aux2014 = pd.to_datetime(Y2014['Date']).dt.strftime('%d-%b-%Y')
Dates2014 = pd.Series(list(Aux2014[0::24]))
DoWeek2014 = pd.to_datetime(Dates2014).dt.day_name()
Load2014 = pd.Series(list(Y2014['DEMAND'].values.reshape(-1,24)))
Temperature2014 = pd.Series(list(Y2014['DryBulb'].values.reshape(-1,24)))
DewPoint2014 = pd.Series(list(Y2014['DewPnt'].values.reshape(-1,24)))
del Y2014
frame2014 = { 'Date': Dates2014, 'Weekday': DoWeek2014}
frame2014['Load'] = list(Load2014)
frame2014['Temperature'] = list(Temperature2014)
frame2014['DewPoint'] = list(DewPoint2014)
Y2014 = pd.DataFrame(frame2014)
Aux2015 = | pd.to_datetime(Y2015['Date']) | pandas.to_datetime |
from typing import List, Dict
from datetime import timedelta, datetime
import numpy as np
import pandas as pd
import nltk.stem as st
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
import math
import time
from app_folder.logic.ukrainian_stemmer import UkrainianStemmer
from config import MINDUR_HRS
class Scheduling:
def __init__(self):
english_stemmer = st.SnowballStemmer('english')
class StemmedCountVectorizer(CountVectorizer):
def build_analyzer(self):
analyzer = super(StemmedCountVectorizer, self).build_analyzer()
return lambda doc: ([english_stemmer.stem(UkrainianStemmer(w).stem_word()) for w in analyzer(doc)])
self.stemmed_vectorizer = StemmedCountVectorizer(analyzer='word', max_features=50)
self.hashtag_vectorizer = CountVectorizer(analyzer='word', max_features=15)
def tasksScheduling(self, tasks: List[Dict], previousTasks: List[Dict]) -> List[Dict]:
if len(tasks) == 0:
raise ValueError('Got nothing')
preparedTasksDF = self.tasksPreparation(tasks, previousTasks)
devidedTasksDF = self.devide_tasks(preparedTasksDF)
order = self.blank_algo(devidedTasksDF)
return self.aggreagate(order, devidedTasksDF)
def tasksPreparation(self, tasks: List[Dict], previousTasks: List[Dict]) -> pd.DataFrame:
if len(previousTasks) > 0:
tasksDF = pd.DataFrame(tasks)
previousTasksDF = pd.DataFrame(previousTasks)
namePrev_features = self.stemmed_vectorizer.fit_transform(previousTasksDF['name'])
namePrev_features_arr = namePrev_features.toarray()
name_features = self.stemmed_vectorizer.transform(tasksDF['name'])
name_features_arr = name_features.toarray()
name_features_names = self.stemmed_vectorizer.get_feature_names()
descriptionPrev_features = self.stemmed_vectorizer.fit_transform(previousTasksDF['description'])
descriptionPrev_features_arr = descriptionPrev_features.toarray()
description_features = self.stemmed_vectorizer.transform(tasksDF['description'])
description_features_arr = description_features.toarray()
descriptionPrev_hashtag_features = self.hashtag_vectorizer.fit_transform(previousTasksDF['hashtags'])
descriptionPrev_hashtag_features_arr = descriptionPrev_hashtag_features.toarray()
description_hashtag_features = self.hashtag_vectorizer.transform(tasksDF['hashtags'])
description_hashtag_features_arr = description_hashtag_features.toarray()
previousTasksDF_prepared = pd.concat([previousTasksDF.drop(['description', 'hashtags', 'deadline', 'completed_at',
'result', 'start_time', 'duration_of_completing',
'can_be_performed_after_dd', 'importance', 'name'], axis=1),
pd.DataFrame(namePrev_features_arr, columns=name_features_names),
pd.DataFrame(descriptionPrev_features_arr, columns=self.stemmed_vectorizer.get_feature_names()),
pd.DataFrame(descriptionPrev_hashtag_features_arr, columns=self.hashtag_vectorizer.get_feature_names())
], axis=1)
previousTasksDF_prepared.set_index('id', inplace=True)
tasksDF_prepared = pd.concat([tasksDF.drop(['description', 'hashtags', 'deadline', 'completed_at', 'result',
'start_time', 'duration_of_completing',
'can_be_performed_after_dd', 'importance', 'name'], axis=1),
pd.DataFrame(name_features_arr, columns=name_features_names),
pd.DataFrame(description_features_arr, columns=self.stemmed_vectorizer.get_feature_names()),
pd.DataFrame(description_hashtag_features_arr, columns=self.hashtag_vectorizer.get_feature_names())], axis=1)
tasksDF_prepared.set_index('id', inplace=True)
#increase: -1, 0, 1
y_train = pd.Series(previousTasksDF.apply(lambda x: 1 if not x['result'] or x['deadline'] < x['completed_at'] else -1 \
if (x['deadline'] - x['completed_at'])/x['duration_of_completing'] > 0.5 else 0, axis=1))
nb = MultinomialNB()
nb.fit(previousTasksDF_prepared, y_train)
y_pred = nb.predict(tasksDF_prepared)
tasksDF['y_pred'] = y_pred
tasksDF['start_time'] = tasksDF['start_time'].fillna(time.time())
tasksDF['estimated_dur'] = tasksDF.apply(lambda x: x['duration_of_completing'] if x['y_pred'] == 0 or
round((x['deadline'] - x['start_time']), 3) == round(x['duration_of_completing'],3) else
x['duration_of_completing']*0.8 if x['y_pred'] == -1 else
x['duration_of_completing']*1.3, axis=1)
tasksDF['estimated_gap'] = tasksDF.apply(lambda x: int(x['duration_of_completing']*0.2) if x['y_pred'] == -1 else
int(x['duration_of_completing']*0.4) if x['y_pred'] == 1 else
int(x['duration_of_completing']*0.3), axis=1)
del namePrev_features_arr, name_features_arr, descriptionPrev_features_arr, description_features_arr, \
descriptionPrev_hashtag_features_arr, description_hashtag_features_arr
return tasksDF
else:
tasksDF = | pd.DataFrame(tasks) | pandas.DataFrame |
"""
"""
import pandas as pd
from transformers import pipeline
from text_to_x.vaderSentiment.vaderSentiment_en import (
SentimentIntensityAnalyzer as Sentiment_en,
)
from text_to_x.vaderSentiment.vaderSentiment_da import (
SentimentIntensityAnalyzer as Sentiment_da,
)
from text_to_x.vaderSentiment.vaderSentiment_se import (
SentimentIntensityAnalyzer as Sentiment_se,
)
from text_to_x.vaderSentiment.vaderSentiment_no import (
SentimentIntensityAnalyzer as Sentiment_no,
)
from text_to_x.TextToTokens import TextToTokens
from text_to_x.TextToX import TextToX
class TextToSentiment(TextToX):
def __init__(
self,
lang=None,
method="mult_bert",
type_token=None,
detect_lang_fun="polyglot",
**kwargs,
):
"""
lang (str): language code, if None language is detected using
detect_lang_fun (which defaults to polyglot).
detect_lang_fun (str|fun): fucntion to use for language detection.
default is polyglot. But you can specify a user function, which return
method ("dictionary"|"bert"|"danlp_bert_tone"|fun): method used for sentiment analysis
type_token (None|'lemma'|'token'): The type of token used. If None is
chosen to be token automatically depending on method.
'lemma' for dictionary otherwise 'token'. Only used if a tokenlist or
a TextToDf is passed to texts_to_sentiment()
"""
super().__init__(lang=lang, kwargs=kwargs, detect_lang_fun=detect_lang_fun)
if type_token is None:
self.type_token = "lemma" if method == "dictionary" else "token"
if method == "dictionary":
self.method = self.__get_sent_dict
elif method == "mult_bert":
self.method = self.__get_sent_mult_bert
elif method == "danlp_bert_tone":
self.method = self.__get_sent_danlp_bert_tone
elif callable(method):
self.method = method
else:
raise ValueError(f"The chosen method: {self.method}")
@staticmethod
def __get_sent_danlp_bert_tone(texts, tokenlist):
from danlp.models import load_bert_tone_model
classifier = load_bert_tone_model()
def get_proba(txt):
res = classifier.predict_proba(txt)
polarity, analytic = res
pos, neu, neg = polarity
obj, subj = analytic
return pos, neu, neg, obj, subj
return pd.DataFrame(
[get_proba(txt) for txt in texts],
columns=[
"polarity_pos",
"polarity_neu",
"polarity_neg",
"analytic_obj",
"analytic_subj",
],
)
def texts_to_sentiment(self, texts):
"""
texts (str|list|TextToDf): Should be a string, a list of strings or
other iterable object or an object of class TextToDf
"""
tokenlist = None
assert isinstance(
texts, (TextToTokens, str, list)
), "'texts' must be str, list of strings or TextToTokens object."
if isinstance(texts, TextToTokens):
tokenlist = [df[self.type_token] for df in texts.get_token_dfs()]
if self.lang is None:
self.lang = texts.lang
texts = texts.texts
else:
if isinstance(texts, str):
texts = [texts]
elif isinstance(texts, list) and not isinstance(texts[0], str):
# One may accidentally pass the list of preprocessed data
# frames
raise TypeError(
f"When 'texts' is a list, it must contain \
strings only."
)
self._detect_language(texts)
return self.method(texts=texts, tokenlist=tokenlist)
def __get_sent_dict(self, texts, tokenlist):
if isinstance(self.lang, str):
self.__fetch_sent_lang(self.lang)
if tokenlist is None:
res = [self.__dict_model[self.lang](text) for text in texts]
else:
res = [
self.__dict_model[self.lang](text, tokens)
for text, tokens in zip(texts, tokenlist)
]
return | pd.DataFrame(res) | pandas.DataFrame |
import pandas as pd
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import os
import argparse
from sklearn import preprocessing
from matplotlib.ticker import EngFormatter
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f1', '--logFolder1', help='Log folder 1', type=str)
parser.add_argument('-f2', '--logFolder2', help='Log folder 2', type=str)
parser.add_argument('-s', '--saveFolder', help='save folder', type=str)
args = parser.parse_args()
path_base = args.logFolder1 # "logs/train_1M_widowx_reach-v3/"
path_base2 = args.logFolder2 # "logs/train_1M_widowx_reach-v3/"
save_dir = args.saveFolder #"experiment_reports/1M_widowx_reach-v3/"
os.makedirs(save_dir, exist_ok=True)
### GET DATA ###
df1 = pd.read_csv(path_base+"a2c/all_rewards_smooth.csv")
df2 = pd.read_csv(path_base+"acktr/all_rewards_smooth.csv")
df3 = pd.read_csv(path_base+"ddpg/all_rewards_smooth.csv")
df4 = pd.read_csv(path_base+"ppo2/all_rewards_smooth.csv")
df5 = pd.read_csv(path_base+"sac/all_rewards_smooth.csv")
df6 = pd.read_csv(path_base+"td3/all_rewards_smooth.csv")
df7 = pd.read_csv(path_base+"trpo/all_rewards_smooth.csv")
df8 = pd.read_csv(path_base+"her_sac/all_rewards_smooth.csv")
df9 = | pd.read_csv(path_base+"her_td3/all_rewards_smooth.csv") | pandas.read_csv |
import sys, os, time, datetime, warnings, configparser
import pandas as pd
import concurrent.futures
from itertools import combinations
from tqdm import tqdm
cur_path = os.path.dirname(os.path.abspath(__file__))
for _ in range(2):
root_path = cur_path[0:cur_path.rfind('/', 0, len(cur_path))]
cur_path = root_path
sys.path.append(root_path + "/" + 'Source/DataBase/')
from DB_API import queryStockList, queryStock, queryCorrelation, storeCorrelation
def get_single_stock_data(root_path, symbol, dates_range):
df, lastUpdateTime = queryStock(root_path, "DB_STOCK", "SHEET_US", "_DAILY", symbol, "daily_update")
if df.empty: return pd.DataFrame()
df.index = pd.to_datetime(df.index)
df = df[df.index.isin(dates_range)].sort_index()
df.loc[:, 'Close_Shift_1'] = df.loc[:, 'adj_close'].shift(1)
df.loc[:, 'Return'] = df.loc[:, 'adj_close']/df.loc[:, 'Close_Shift_1'] - 1
return df
def get_all_stocks_correlation(root_path, dates_range):
df = queryCorrelation(root_path, "DB_STOCK", "SHEET_US_RELA")
if df.empty == False: return df
df = queryStockList(root_path, "DB_STOCK", "SHEET_US_DAILY")
symbols = df.index.values.tolist()
pbar = tqdm(total=len(symbols))
stockData = []
stockList = []
print("get stock data...")
# count = 500
for symbol in symbols:
startTime = time.time()
df = get_single_stock_data(root_path, symbol, dates_range)
if df.empty: continue
stockData.append(df['Return'])
stockList.append(symbol)
outMessage = '%-*s fetched in: %.4s seconds' % (12, symbol, (time.time() - startTime))
pbar.set_description(outMessage)
pbar.update(1)
# count -= 1
# if count == 0: break
pbar.close()
print("merge stock data...")
startTime = time.time()
df_returns = pd.concat(stockData, axis=1).fillna(0)
df_returns.columns = stockList
df_correlations = df_returns.corr()
print('total processing in: %.4s seconds' % ((time.time() - startTime)))
print("cal correlationship...")
startTime = time.time()
pairwise_correlations = []
stockCount = len(stockList)
pbar = tqdm(total=stockCount*stockCount)
for i in range(stockCount):
for j in range(stockCount):
if j > i:
pairwise_correlations.append(df_correlations.iloc[i][j])
pbar.set_description(str(i) + " " + str(j))
pbar.update(1)
print("arrange matrix...")
us_company_pairs = combinations(stockList, 2)
df_us_company_pairs = pd.DataFrame(list(us_company_pairs))
df_us_company_pairs.columns = ['company1', 'company2']
df_us_company_pairs.loc[:, 'correlation'] = pd.Series(pairwise_correlations).T
df_us_company_pairs = df_us_company_pairs.sort_values(['correlation'], ascending=[False]).reset_index(drop=True)
storeCorrelation(root_path, "DB_STOCK", "SHEET_US_RELA", df_us_company_pairs)
print('total processing in: %.4s seconds' % ((time.time() - startTime)))
pbar.close()
return df_us_company_pairs
if __name__ == "__main__":
#np.seterr(divide='ignore', invalid='ignore')
#np.set_printoptions(precision=3, suppress=True)
pd.set_option('precision', 3)
pd.set_option('display.width',1000)
warnings.filterwarnings('ignore', category=pd.io.pytables.PerformanceWarning)
now_date = datetime.datetime.now()
start_date = (now_date - datetime.timedelta(days=90)).strftime("%Y-%m-%d")
end_date = now_date.strftime("%Y-%m-%d")
config = configparser.ConfigParser()
config.read(root_path + "/" + "config.ini")
storeType = int(config.get('Setting', 'StoreType'))
# if storeType == 1:
# from Start_DB_Server import StartServer, ShutdownServer
# # start database server (async)
# thread = StartServer(root_path)
# # wait for db start, the standard procedure should listen to
# # the completed event of function "StartServer"
# time.sleep(5)
df = get_all_stocks_correlation(root_path, | pd.date_range(start_date, end_date) | pandas.date_range |
#!/usr/bin/env python3
'''
File: read kpi.csv place in sql database, create png of historical kpi and present graph on dashboard
Usage: kpi_csv_sq.py --store --png --show --path <path to directories to traverse> --database <name of database>
Example: kpi_csv_sq.py --show (show dashboard generated from database)
Example: kpi_csv_sq.py --store --png --show --path <path to read kpi.csv> (read kpi.csv store to database, write png, show dashboard )
'''
# visit http://127.0.0.1:8050/ in your web browser.
import os
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.express as px
import pandas as pd
import sqlite3
import argparse
from pathlib import Path
import time
# Any style components can be used
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
class csv_sqlite_dash():
def __init__(self,
_path = '.',
_file = 'kpi.csv',
_database = 'qa_db',
_table = 'qa_table',
_png = False):
self.path = _path
self.file = _file
self.database = _database
self.table = _table
self.png = _png
self.png_generated = False
self.kpi_list = []
self.html_list = []
self.conn = None
self.df = | pd.DataFrame() | pandas.DataFrame |
import os
import re
import matplotlib.pyplot as plt
import pandas as pd
from mawiparse.utils import read_all_pkl
def plot_monthly(date):
all_list = [x[0] for x in os.walk('data')]
dir_list = []
for x in all_list:
if str(date) in str(x):
# print(f"{str(x)} has 202003")
dir_list.append(x)
else:
# print(f"{str(x)} doesn't have 202003")
pass
dir_list = sorted(dir_list)
print(dir_list)
# read all directory from this current directory
# calculate average, sum
sum_vol = 0
sum_list = []
average_list = []
label_list = []
type_pd = pd.DataFrame(index={'ICMP', "TCP", "UDP", "Others"})
for curr_dir in dir_list:
vol_data, type_data, tcp_d, udp_d, _ = read_all_pkl(curr_dir + '/')
'''
#1: sum the total volume
'''
sum_list.append(sum(vol_data.values()))
label_list.append(curr_dir)
average_list.append(sum(vol_data.values()) / len(vol_data))
'''
#2: type classification
'''
curr_date = curr_dir.split('/')[1]
# df_new = dat.rename( index=lambda s: str(s)[:-2].split('.')[-1] )
dat = | pd.DataFrame.from_dict(type_data, orient='index') | pandas.DataFrame.from_dict |
from django.forms.models import model_to_dict
from rules.contrib.views import permission_required, objectgetter
import math, json, logging
from datetime import datetime, timedelta
from django.utils import timezone
import numpy as np
import pandas as pd
from django.conf import settings
from django.contrib import auth
from django.db import connection as conn
from django.http import HttpResponse, HttpResponseForbidden
from django.shortcuts import redirect, render
from pinax.eventlog.models import log as eventlog
from dashboard.event_logs_types.event_logs_types import EventLogTypes
from dashboard.common.db_util import canvas_id_to_incremented_id
from dashboard.common import utils
from django.core.exceptions import ObjectDoesNotExist
from collections import namedtuple
from dashboard.models import Course, CourseViewOption, Resource, UserDefaultSelection
from dashboard.settings import RESOURCE_VALUES, COURSES_ENABLED
logger = logging.getLogger(__name__)
# strings for construct resource download url
RESOURCE_URLS = settings.RESOURCE_URLS
CANVAS_FILE_ID_NAME_SEPARATOR = "|"
# string for no grade
GRADE_A="90-100"
GRADE_B="80-89"
GRADE_C="70-79"
GRADE_LOW="low_grade"
NO_GRADE_STRING = "NO_GRADE"
# string for resource type
RESOURCE_TYPE_STRING = "resource_type"
RESOURCE_VALUES = settings.RESOURCE_VALUES
# Is courses_enabled api enabled/disabled?
COURSES_ENABLED = settings.COURSES_ENABLED
# how many decimal digits to keep
DECIMAL_ROUND_DIGIT = 1
def gpa_map(grade):
if grade is None:
return NO_GRADE_STRING
# convert to float
grade_float = float(grade)
if grade_float >= 90:
return GRADE_A
elif grade_float >=80:
return GRADE_B
elif grade_float >=70:
return GRADE_C
else:
return GRADE_LOW
def get_home_template(request):
return render(request, 'frontend/index.html')
@permission_required('dashboard.get_course_template',
fn=objectgetter(Course, 'course_id', 'canvas_id'), raise_exception=True)
def get_course_template(request, course_id=0):
return render(request, 'frontend/index.html', {'course_id': course_id})
@permission_required('dashboard.get_course_info',
fn=objectgetter(Course, 'course_id', 'canvas_id'), raise_exception=True)
def get_course_info(request, course_id=0):
"""Returns JSON data about a course
:param request: HTTP Request
:type request: Request
:param course_id: Unizin Course ID, defaults to 0
:param course_id: int, optional
:return: JSON to be used
:rtype: str
"""
course_id = canvas_id_to_incremented_id(course_id)
today = timezone.now()
try:
course = Course.objects.get(id=course_id)
except ObjectDoesNotExist:
return HttpResponse("{}")
course_resource_list = []
try:
resource_list = Resource.objects.get_course_resource_type(course_id)
if resource_list is not None:
logger.info(f"Course {course_id} resources data type are: {resource_list}")
resource_defaults = settings.RESOURCE_VALUES
for item in resource_list:
result = utils.look_up_key_for_value(resource_defaults, item)
if result is not None:
course_resource_list.append(result.capitalize())
logger.info(f"Mapped generic resource types in a course {course_id}: {course_resource_list}")
except(ObjectDoesNotExist,Exception) as e:
logger.info(f"getting the course {course_id} resources types has errors due to:{e}")
course_resource_list.sort()
resp = model_to_dict(course)
course_start, course_end = course.get_course_date_range()
current_week_number = math.ceil((today - course_start).days/7)
total_weeks = math.ceil((course_end - course_start).days/7)
if course.term is not None:
resp['term'] = model_to_dict(course.term)
else:
resp['term'] = None
# Have a fixed maximum number of weeks
if total_weeks > settings.MAX_DEFAULT_WEEKS:
logger.debug(f'{total_weeks} is greater than {settings.MAX_DEFAULT_WEEKS} setting total weeks to default.')
total_weeks = settings.MAX_DEFAULT_WEEKS
resp['current_week_number'] = current_week_number
resp['total_weeks'] = total_weeks
resp['course_view_options'] = CourseViewOption.objects.get(course=course).json(include_id=False)
resp['resource_types'] = course_resource_list
return HttpResponse(json.dumps(resp, default=str))
# show percentage of users who read the resource within prior n weeks
@permission_required('dashboard.resource_access_within_week',
fn=objectgetter(Course, 'course_id','canvas_id'), raise_exception=True)
def resource_access_within_week(request, course_id=0):
course_id = canvas_id_to_incremented_id(course_id)
current_user = request.user.get_username()
logger.debug("current_user=" + current_user)
# environment settings:
df_default_display_settings()
# read quefrom request param
week_num_start = int(request.GET.get('week_num_start','1'))
week_num_end = int(request.GET.get('week_num_end','0'))
grade = request.GET.get('grade','all')
filter_values = request.GET.get(RESOURCE_TYPE_STRING, ['files', 'videos'])
filter_values = filter_values.split(",")
filter_list = []
for filter_value in filter_values:
if filter_value != '':
filter_list.extend(RESOURCE_VALUES[filter_value.lower()])
# json for eventlog
data = {
"week_num_start": week_num_start,
"week_num_end": week_num_end,
"grade": grade,
"course_id": course_id,
"resource_type": filter_values
}
eventlog(request.user, EventLogTypes.EVENT_VIEW_RESOURCE_ACCESS.value, extra=data)
# get total number of student within the course_id
total_number_student_sql = "select count(*) from user where course_id = %(course_id)s and enrollment_type='StudentEnrollment'"
if (grade == GRADE_A):
total_number_student_sql += " and current_grade >= 90"
elif (grade == GRADE_B):
total_number_student_sql += " and current_grade >= 80 and current_grade < 90"
elif (grade == GRADE_C):
total_number_student_sql += " and current_grade >= 70 and current_grade < 80"
total_number_student_df = pd.read_sql(total_number_student_sql, conn, params={"course_id": course_id})
total_number_student = total_number_student_df.iloc[0,0]
logger.info(f"course_id {course_id} total student={total_number_student}")
if total_number_student == 0:
logger.info(f"There are no students in the percent grade range {grade} for course {course_id}")
return HttpResponse("{}")
course_date_start = get_course_date_start(course_id)
start = course_date_start + timedelta(days=(week_num_start * 7))
end = course_date_start + timedelta(days=(week_num_end * 7))
logger.debug("course_start=" + str(course_date_start) + " start=" + str(start) + " end=" + str(end))
# get time range based on week number passed in via request
sqlString = f"""SELECT a.resource_id as resource_id, r.resource_type as resource_type, r.name as resource_name, u.current_grade as current_grade, a.user_id as user_id
FROM resource r, resource_access a, user u, course c, academic_terms t
WHERE a.resource_id = r.id and a.user_id = u.user_id
and r.course_id = c.id and c.term_id = t.id
and a.access_time > %(start_time)s
and a.access_time < %(end_time)s
and r.course_id = %(course_id)s
and u.course_id = %(course_id)s
and u.enrollment_type = 'StudentEnrollment' """
startTimeString = start.strftime('%Y%m%d') + "000000"
endTimeString = end.strftime('%Y%m%d') + "000000"
logger.debug(sqlString)
logger.debug("start time=" + startTimeString + " end_time=" + endTimeString)
df = pd.read_sql(sqlString, conn, params={"start_time": startTimeString,"end_time": endTimeString, "course_id": course_id})
logger.debug(df)
# return if there is no data during this interval
if (df.empty):
return HttpResponse("{}")
# group by resource_id, and resource_name
# reformat for output
df['resource_id_name'] = df['resource_id'].astype(str).str.cat(df['resource_name'], sep=';')
df=df.drop(['resource_id', 'resource_name'], axis=1)
df.set_index(['resource_id_name'])
# drop resource records when the resource has been accessed multiple times by one user
df.drop_duplicates(inplace=True)
# map point grade to letter grade
df['grade'] = df['current_grade'].map(gpa_map)
# calculate the percentage
df['percent'] = round(df.groupby(['resource_id_name', 'grade'])['resource_id_name'].transform('count')/total_number_student, 2)
df=df.drop(['current_grade', 'user_id'], axis=1)
# now only keep the resource access stats by grade level
df.drop_duplicates(inplace=True)
resource_id_name=df["resource_id_name"].unique()
#df.reset_index(inplace=True)
# zero filled dataframe with resource name as row name, and grade as column name
output_df=pd.DataFrame(0.0, index=resource_id_name, columns=[GRADE_A, GRADE_B, GRADE_C, GRADE_LOW, NO_GRADE_STRING, RESOURCE_TYPE_STRING])
output_df=output_df.rename_axis('resource_id_name')
output_df=output_df.astype({RESOURCE_TYPE_STRING: str})
for index, row in df.iterrows():
# set value
output_df.at[row['resource_id_name'], row['grade']] = row['percent']
output_df.at[row['resource_id_name'], RESOURCE_TYPE_STRING] = row[RESOURCE_TYPE_STRING]
output_df.reset_index(inplace=True)
# now insert person's own viewing records: what resources the user has viewed, and the last access timestamp
# now insert person's own viewing records: what resources the user has viewed, and the last access timestamp
selfSqlString = "select CONCAT(r.id, ';', r.name) as resource_id_name, count(*) as self_access_count, max(a.access_time) as self_access_last_time " \
"from resource_access a, user u, resource r " \
"where a.user_id = u.user_id " \
"and a.resource_id = r.ID " \
"and u.sis_name=%(current_user)s " \
"group by CONCAT(r.id, ';', r.name)"
logger.debug(selfSqlString)
logger.debug("current_user=" + current_user)
selfDf= pd.read_sql(selfSqlString, conn, params={"current_user":current_user})
output_df = output_df.join(selfDf.set_index('resource_id_name'), on='resource_id_name', how='left')
output_df["total_count"] = output_df.apply(lambda row: row["90-100"]+row["80-89"]+row["70-79"] + row["low_grade"]+row.NO_GRADE, axis=1)
if (grade != "all"):
# drop all other grades
grades = [GRADE_A, GRADE_B, GRADE_C, GRADE_LOW, NO_GRADE_STRING]
for i_grade in grades:
if (i_grade==grade):
output_df["total_count"] = output_df[i_grade]
else:
output_df=output_df.drop([i_grade], axis=1)
output_df=output_df[output_df.resource_type.isin(filter_list)]
# if no checkboxes are checked send nothing
if (output_df.empty):
return HttpResponse("{}")
# only keep rows where total_count > 0
output_df = output_df[output_df.total_count > 0]
# time 100 to show the percentage
output_df["total_count"] = output_df["total_count"] * 100
# round all numbers to one decimal point
output_df = output_df.round(DECIMAL_ROUND_DIGIT)
output_df.fillna(0, inplace=True) #replace null value with 0
output_df['resource_id_part'], output_df['resource_name_part'] = output_df['resource_id_name'].str.split(';', 1).str
output_df['resource_name'] = output_df.apply(lambda row: RESOURCE_URLS[row.resource_type]["prefix"] + row.resource_id_part + RESOURCE_URLS[row.resource_type]["postfix"] + CANVAS_FILE_ID_NAME_SEPARATOR + row.resource_name_part, axis=1)
output_df.drop(columns=['resource_id_part', 'resource_name_part', 'resource_id_name'], inplace=True)
logger.debug(output_df.to_json(orient='records'))
return HttpResponse(output_df.to_json(orient='records'),content_type='application/json')
@permission_required('dashboard.grade_distribution',
fn=objectgetter(Course, 'course_id','canvas_id'), raise_exception=True)
def grade_distribution(request, course_id=0):
logger.info(grade_distribution.__name__)
course_id = canvas_id_to_incremented_id(course_id)
current_user = request.user.get_username()
grade_score_sql = f"""select current_grade,
(select show_grade_counts From course where id=%(course_id)s) as show_number_on_bars,
(select current_grade from user where sis_name=%(current_user)s and course_id=%(course_id)s) as current_user_grade
from user where course_id=%(course_id)s and enrollment_type='StudentEnrollment';
"""
df = pd.read_sql(grade_score_sql, conn, params={"current_user": current_user, 'course_id': course_id})
if df.empty or df['current_grade'].isnull().all():
return HttpResponse(json.dumps({}), content_type='application/json')
df['tot_students'] = df.shape[0]
df = df[df['current_grade'].notnull()]
df['current_grade'] = df['current_grade'].astype(float)
df['grade_avg'] = df['current_grade'].mean().round(2)
df['median_grade'] = df['current_grade'].median().round(2)
df['show_number_on_bars'] = df['show_number_on_bars'].apply(lambda x: True if x == 1 else False)
df.sort_values(by=['current_grade'], inplace=True)
df.reset_index(drop=True, inplace=True)
grades = df['current_grade'].values.tolist()
logger.debug(f"Grades distribution: {grades}")
BinningGrade = find_binning_grade_value(grades)
if BinningGrade is not None and not BinningGrade.binning_all:
df['current_grade'] = df['current_grade'].replace(df['current_grade'].head(BinningGrade.index),
BinningGrade.value)
df['show_dash_line'] = show_dashed_line(df['current_grade'].iloc[0], BinningGrade)
if df[df['current_grade'] > 100.0].shape[0] > 0:
df['graph_upper_limit'] = int((5 * round(float(df['current_grade'].max()) / 5) + 5))
else:
df['current_grade'] = df['current_grade'].apply(lambda x: 99.99 if x == 100.00 else x)
df['graph_upper_limit'] = 100
# json for eventlog
data = {
"course_id": course_id,
"show_number_on_bars": df['show_number_on_bars'].values[0]
}
eventlog(request.user, EventLogTypes.EVENT_VIEW_GRADE_DISTRIBUTION.value, extra=data)
return HttpResponse(df.to_json(orient='records'))
@permission_required('dashboard.update_user_default_selection_for_views',
fn=objectgetter(Course, 'course_id','canvas_id'), raise_exception=True)
def update_user_default_selection_for_views(request, course_id=0):
logger.info(update_user_default_selection_for_views.__name__)
course_id = canvas_id_to_incremented_id(course_id)
current_user = request.user.get_username()
default_selection = json.loads(request.body.decode("utf-8"))
logger.info(default_selection)
default_type = list(default_selection.keys())[0]
default_type_value = default_selection.get(default_type)
logger.info(f"request to set default for type: {default_type} and default_type value: {default_type_value}")
# json for eventlog
data = {
"course_id": course_id,
"default_type": default_type,
"default_value": default_type_value
}
eventlog(request.user, EventLogTypes.EVENT_VIEW_SET_DEFAULT.value, extra=data)
key = 'default'
try:
obj, create_or_update_bool = UserDefaultSelection.objects.set_user_defaults(int(course_id), current_user,
default_type,
default_type_value)
logger.info(
f"""setting default returns with success with response {obj.__dict__} and entry created or Updated: {create_or_update_bool}
for user {current_user} in course {course_id} """)
value = 'success'
except (ObjectDoesNotExist, Exception) as e:
logger.info(f"updating default failed due to {e} for user {current_user} in course: {course_id} ")
value = 'fail'
return HttpResponse(json.dumps({key: value}),content_type='application/json')
@permission_required('dashboard.get_user_default_selection',
fn=objectgetter(Course, 'course_id','canvas_id'), raise_exception=True)
def get_user_default_selection(request, course_id=0):
logger.info(get_user_default_selection.__name__)
course_id = canvas_id_to_incremented_id(course_id)
user_sis_name = request.user.get_username()
default_view_type = request.GET.get('default_type')
key = 'default'
no_user_default_response = json.dumps({key: ''})
logger.info(f"the default option request from user {user_sis_name} in course {course_id} of type: {default_view_type}")
default_value = UserDefaultSelection.objects.get_user_defaults(int(course_id), user_sis_name, default_view_type)
logger.info(f"""default option check returned from DB for user: {user_sis_name} course {course_id} and type:
{default_view_type} is {default_value}""")
if not default_value:
logger.info(
f"user {user_sis_name} in course {course_id} don't have any defaults values set type {default_view_type}")
return HttpResponse(no_user_default_response, content_type='application/json')
result = json.dumps({key: default_value})
logger.info(f"user {user_sis_name} in course {course_id} for type {default_view_type} defaults: {result}")
return HttpResponse(result, content_type='application/json')
@permission_required('dashboard.assignments',
fn=objectgetter(Course, 'course_id','canvas_id'), raise_exception=True)
def assignments(request, course_id=0):
logger.info(assignments.__name__)
course_id = canvas_id_to_incremented_id(course_id)
current_user = request.user.get_username()
df_default_display_settings()
percent_selection = float(request.GET.get('percent', '0.0'))
# json for eventlog
data = {
"course_id": course_id,
"percent_selection": percent_selection
}
eventlog(request.user, EventLogTypes.EVENT_VIEW_ASSIGNMENT_PLANNING.value, extra=data)
logger.info('selection from assignment Planning {}'.format(percent_selection))
assignments_in_course = get_course_assignments(course_id)
if assignments_in_course.empty:
return HttpResponse(json.dumps([]), content_type='application/json')
assignment_submissions = get_user_assignment_submission(current_user, assignments_in_course, course_id)
df = pd.merge(assignments_in_course, assignment_submissions, on='assignment_id', how='left')
if df.empty:
logger.info('There are no assignment data in the course %s for user %s ' % (course_id, current_user))
return HttpResponse(json.dumps([]), content_type='application/json')
df.sort_values(by='due_date', inplace=True)
df.drop(columns=['assignment_id', 'due_date','grp_id'], inplace=True)
df.drop_duplicates(keep='first', inplace=True)
# instructor might not ever see the avg score as he don't have grade in assignment. we don't have role described in the flow to open the gates for him
if not request.user.is_superuser:
df['avg_score']= df.apply(no_show_avg_score_for_ungraded_assignments, axis=1)
df['avg_score']=df['avg_score'].fillna('Not available')
df3 = df[df['towards_final_grade'] > 0.0]
df3[['score']] = df3[['score']].astype(float)
df3['graded'] = df3['graded'].fillna(False)
df3[['score']] = df3[['score']].astype(float)
df3['percent_gotten'] = df3.apply(lambda x: user_percent(x), axis=1)
df3.sort_values(by=['graded', 'due_date_mod'], ascending=[False, True], inplace=True)
df3.reset_index(inplace=True)
df3.drop(columns=['index'], inplace=True)
assignment_data = {}
assignment_data['progress'] = json.loads(df3.to_json(orient='records'))
# Group the data according the assignment prep view
df2 = df[df['towards_final_grade'] >= percent_selection]
df2.reset_index(inplace=True)
df2.drop(columns=['index'], inplace=True)
logger.debug('The Dataframe for the assignment planning %s ' % df2)
grouped = df2.groupby(['week', 'due_dates'])
assignment_list = []
for name, group in grouped:
# name is a tuple of (week,due_date) => (1,'06/23/2018')
# group is a dataframe based on grouping by week,due_date
dic = {}
group.drop(['week', 'due_dates'], axis=1, inplace=True)
dic['week'] = name[0]
dic['due_date'] = name[1]
dic['assign'] = json.loads(group.to_json(orient='records'))
assignment_list.append(dic)
week_list = set()
for item in assignment_list:
week_list.add(item['week'])
weeks = sorted(week_list)
full = []
for i, week in enumerate(weeks):
data = {}
data["week"] = np.uint64(week).item()
data["id"] = i + 1
dd_items = data["due_date_items"] = []
for item in assignment_list:
assignment_due_date_grp = {}
if item['week'] == week:
assignment_due_date_grp['due_date'] = item['due_date']
assignment_due_date_grp['assignment_items'] = item['assign']
dd_items.append(assignment_due_date_grp)
full.append(data)
assignment_data['plan'] = json.loads(json.dumps(full))
return HttpResponse(json.dumps(assignment_data), content_type='application/json')
def get_course_assignments(course_id):
sql=f"""select assign.*,sub.avg_score from
(select ifnull(assignment_id, 0) as assignment_id ,name,assign_grp_name,grp_id,due_date,points_possible,group_points,weight,drop_lowest,drop_highest from
(select a.id as assignment_id,a.assignment_group_id, a.local_date as due_date,a.name,a.points_possible from assignment as a where a.course_id =%(course_id)s) as app right join
(select id, name as assign_grp_name, id as grp_id, group_points, weight,drop_lowest,drop_highest from assignment_groups where course_id=%(course_id)s) as ag on ag.id=app.assignment_group_id) as assign left join
(select distinct assignment_id,avg_score from submission where course_id=%(course_id)s) as sub on sub.assignment_id = assign.assignment_id
"""
assignments_in_course = pd.read_sql(sql,conn,params={'course_id': course_id}, parse_dates={'due_date': '%Y-%m-%d'})
# No assignments found in the course
if assignments_in_course.empty:
logger.info('The course %s don\'t seems to have assignment data' % course_id)
return assignments_in_course
assignments_in_course['due_date'] = pd.to_datetime(assignments_in_course['due_date'],unit='ms')
assignments_in_course[['points_possible','group_points']]=assignments_in_course[['points_possible','group_points']].fillna(0)
assignments_in_course[['points_possible', 'group_points','weight']] = assignments_in_course[['points_possible', 'group_points','weight']].astype(float)
consider_weight=is_weight_considered(course_id)
df2 = assignments_in_course[['weight','group_points','grp_id']].drop_duplicates()
hidden_assignments = are_weighted_assignments_hidden(course_id, df2)
total_points=assignments_in_course['points_possible'].sum()
# if assignment group is weighted and no assignments added yet then assignment name will be nothing so situation is specific to that
if hidden_assignments:
assignments_in_course['name'] = assignments_in_course['name'].fillna(assignments_in_course['assign_grp_name']+' Group Unavailable Assignments')
assignments_in_course['towards_final_grade']=assignments_in_course.apply(lambda x: percent_calculation(consider_weight, total_points,hidden_assignments, x), axis=1)
assignments_in_course['calender_week']=assignments_in_course['due_date'].dt.week
assignments_in_course['calender_week']=assignments_in_course['calender_week'].fillna(0).astype(int)
min_week=find_min_week(course_id)
max_week=assignments_in_course['calender_week'].max()
week_list = [x for x in range(min_week,max_week+1)]
assignments_in_course['week']=assignments_in_course['calender_week'].apply(lambda x: 0 if x == 0 else week_list.index(x)+1)
assignments_in_course.sort_values(by='due_date', inplace = True)
assignments_in_course['current_week']=assignments_in_course['calender_week'].apply(lambda x: find_current_week(x))
assignments_in_course['due_date_mod'] =assignments_in_course['due_date'].astype(str).apply(lambda x:x.split()[0])
assignments_in_course['due_dates']= pd.to_datetime(assignments_in_course['due_date_mod']).dt.strftime('%m/%d')
assignments_in_course['due_dates'].replace('NaT','N/A',inplace=True)
return assignments_in_course
def get_user_assignment_submission(current_user,assignments_in_course_df, course_id):
sql = "select assignment_id, score, graded_date from submission where " \
"user_id=(select user_id from user where sis_name = %(current_user)s and course_id = %(course_id)s ) and course_id = %(course_id)s"
assignment_submissions = pd.read_sql(sql, conn, params={'course_id': course_id, "current_user": current_user})
if assignment_submissions.empty:
logger.info('The user %s seems to be a not student in the course.' % current_user)
# manually adding the columns for display in UI
assignment_submissions = pd.DataFrame()
assignment_submissions['assignment_id'] = assignments_in_course_df['assignment_id']
assignment_submissions['score'] = None
assignment_submissions['graded'] = False
else:
assignment_submissions['graded'] = assignment_submissions['graded_date'].notnull()
assignment_submissions.drop(columns=['graded_date'], inplace=True)
return assignment_submissions
# don't show the avg scores for student when individual assignment is not graded as canvas currently don't show it
def no_show_avg_score_for_ungraded_assignments(row):
if row['score'] is None:
return 'Not available'
else: return row['avg_score']
def user_percent(row):
if row['graded']:
s = round((row['score'] / row['points_possible']) * row['towards_final_grade'], 2)
return s
else:
return row['towards_final_grade']
def percent_calculation(consider_weight,total_points,hidden_assignments,row):
"""
This function handles how much % an assignment worth in a course. The cases
includes 1. assignments groups has weights and no hidden assignments in them
2. vanilla case default group, no weights, and irrespective if assignment are hidden or not
3. assignment groups has weights, hidden or no assignments in them
:param consider_weight:
:param total_points:
:param hidden_assignments:
:param row:
:return:
"""
if hidden_assignments and consider_weight and row['group_points'] == 0:
return round(row['weight'],2)
if hidden_assignments and consider_weight and row['group_points'] != 0:
return round((row['points_possible']/row['group_points'])*row['weight'],2)
if consider_weight and row['group_points']!=0:
return round((row['points_possible']/row['group_points'])*row['weight'],2)
if not consider_weight:
return round((row['points_possible']/total_points)*100,2)
def find_min_week(course_id):
date = get_course_date_start(course_id)
year,week,dow=date.isocalendar()
return week
def find_current_week(row):
# this needs to be local timezone
current_date = timezone.localtime(timezone.now())
year,week,dow = current_date.isocalendar() #dow = day of week
if row == week:
return True
else: return False
def is_weight_considered(course_id):
url = "select consider_weight from assignment_weight_consideration where course_id=%(course_id)s"
df = pd.read_sql(url, conn, params={"course_id": course_id})
value = df['consider_weight'].iloc[0]
return value
def get_course_date_start(course_id):
logger.info(get_course_date_start.__name__)
course_date_start = Course.objects.get(id=course_id).get_course_date_range().start
return course_date_start
def are_weighted_assignments_hidden(course_id, df):
"""
if assignments are weighted then assignment groups weight totals =100% . The code is checking if assignment groups
has group points corresponding to group weight and if not assignments are hidden
:param course_id:
:return:
"""
logger.info(are_weighted_assignments_hidden.__name__)
df['weight'] = df['weight'].astype(int)
tot_weight = df['weight'].sum()
if tot_weight > 0:
df['hidden'] = 0
df = df[df['weight'] > 0]
df = df.reset_index(drop=True)
df.loc[0, 'hidden'] = df.loc[0, 'weight']
for i in range(1, len(df)):
if df.loc[i, 'group_points']:
df.loc[i, 'hidden'] = df.loc[i - 1, 'hidden'] + df.loc[i, 'weight']
if df['hidden'].max() == 100:
logger.info(f"weighted assignments in course {course_id} are not hidden")
return False
else:
logger.info(f"few weighted assignments in course {course_id} are hidden")
return True
def find_binning_grade_value(grades):
fifth_item = grades[4]
next_to_fifth_item = grades[5]
if next_to_fifth_item - fifth_item > 2:
BinningGrade = get_binning_grade()
return BinningGrade(value=fifth_item, index=4, binning_all=False)
else:
return binning_logic(grades, fifth_item)
def is_odd(num):
if num % 2 == 0:
return False
else:
return True
def show_dashed_line(grade, BinningGrade):
"""
logic determine to show dashed line or not.
:param grade:
:param BinningGrade:
:return:
"""
if BinningGrade.binning_all or grade > 96 or grade < 2:
return False
else:
return True
def check_if_grade_qualifies_for_binning(grade, fifthElement):
# case: 96.7, 94.76,
if int(grade) - int(fifthElement) > 1:
return False
# case: 94.86, 94.76
if int(grade) - int(fifthElement) == 0:
return True
# case 95.89, 94.76
if is_odd(int(grade)):
return True
def binning_logic(grades, fifth_item_in_list):
"""
Histogram binning is by 2 [ [0,2], [2,4], [4,6], …..] each item in the list starting number is inclusive and second
is exclusive.
Case 1: Just last 5 are binned
Actual distribution: [69.79, 80.0, 80.5, 88.21, 88.79, 92.71, 92.71, 92.71, 93.14, 94.43]
Binning Distribution: [88.79, 88.79, 88.79, 88.79, 88.79, 92.71, 92.71, 92.71, 93.14, 94.43]
Case 2: More than last 5 are binned based on histogram binning by count of 2
Actual Distribution: [90.77, 93.09, 93.42, 94.85, 94.87, 94.88, 94.9, 95.55, 95.89, 96.28, 96.4, 96.47, 96.49, 96.68]
Binning Distribution: [95.89, 95.89, 95.89, 95.89, 95.89, 95.89, 95.89, 95.89, 95.89,96.28, 96.4, 96.47, 96.49, 96.68]
:param grades: sorted in asc
:param fifth_item_in_list:
:return: max grade in the binned list, length of binned grades, bool value indicating whether all grades are being binned
"""
binning_list = grades[:5]
BinningGrade = get_binning_grade()
for grade in grades[5:]:
if check_if_grade_qualifies_for_binning(grade, fifth_item_in_list):
binning_list.append(grade)
else:
return BinningGrade(max(binning_list), len(binning_list),False)
return BinningGrade(max(binning_list), len(binning_list), True)
def get_binning_grade():
return namedtuple('BinningGrade', ['value', 'index','binning_all'])
def df_default_display_settings():
pd.set_option('display.max_column', None)
| pd.set_option('display.max_rows', None) | pandas.set_option |
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import collections as cl
import pandas as pd
from .crop import Crop
from .scenario import Scenario
import json
from .util import *
class District():
def __init__(self, df, name, key, scenario_file = 'baseline'):
self.T = len(df)
self.starting_year = df.index.year[0]
self.number_years = df.index.year[-1]-df.index.year[0]
self.key = key
self.name = name
self.leap = leap(np.arange(min(df.index.year), max(df.index.year) + 2))
year_list = np.arange(min(df.index.year), max(df.index.year) + 2)
self.days_in_month = days_in_month(year_list, self.leap)
self.dowy_eom = dowy_eom(year_list, self.leap)
self.non_leap_year = first_non_leap_year(self.dowy_eom)
self.turnback_use = True
for k, v in json.load(open('cord/districts/%s_properties.json' % key)).items():
setattr(self, k, v)
if ((scenario_file == 'baseline') == False):
for k,v in json.load(open(scenario_file)).items():
setattr(self,k,v)
#intialize crop acreages and et demands for crops
self.irrdemand = Crop(self.zone)
#initialize dictionary to hold different delivery types
self.deliveries = {}
self.contract_list_all = ['tableA', 'cvpdelta', 'exchange', 'cvc', 'friant1', 'friant2','kaweah', 'tule', 'kern','kings']
self.non_contract_delivery_list = ['recover_banked','inleiu_irrigation','inleiu_recharge','leiupumping','recharged','exchanged_GW','exchanged_SW','undelivered_trades']
for x in self.contract_list_all:
#normal contract deliveries
self.deliveries[x] = np.zeros(self.number_years)
#uncontrolled deliveries from contract
self.deliveries[x + '_flood'] = np.zeros(self.number_years)
# contract-specific recharge (same as 'recharged', but disaggregated)
self.deliveries[x + '_recharged'] = np.zeros(self.number_years)
#deliveries from a groundwater bank (reocrded by banking partner recieving recovery water)
self.deliveries[x+ '_flood_irrigation'] = np.zeros(self.number_years)
#deliveries from a groundwater bank (reocrded by banking partner recieving recovery water)
self.deliveries['recover_banked'] = np.zeros(self.number_years)
#deliveries to a in-leiu bank from a banking partner (recorded by the district acting as a bank)
self.deliveries['inleiu_irrigation'] = np.zeros(self.number_years)
self.deliveries['inleiu_recharge'] = np.zeros(self.number_years)
#deliveries from an in leiu bank to a banking partner (recorded by the district acting as a bank)
self.deliveries['leiupumping'] = np.zeros(self.number_years)
#water delivered from a contract to a recharge basin (direct or in leiu, recorded by the banking partner who owned the water)
self.deliveries['recharged'] = np.zeros(self.number_years)
#deliveries made from a districts bank to third-party district (district recieves a surface water 'paper' credit)
self.deliveries['exchanged_GW'] = np.zeros(self.number_years)
#recorded when a district recieves water from a bank owned by another district (district gives a surface water 'paper' credit)
self.deliveries['exchanged_SW'] = np.zeros(self.number_years)
self.deliveries['undelivered_trades'] = np.zeros(self.number_years)
#set dictionaries to keep track of different 'color' water for each contract
self.current_balance = {}#contract water currently available in surface water storage
self.paper_balance = {}#balance (positive) or negative of paper trades made from groundwater banks
self.turnback_pool = {}#water purchased from intra-contract marketes (June 1st)
self.projected_supply = {}#projected annual allocation to each contract
self.carryover = {}#water 'carried over' in surface water storage from previous year's contract
self.recharge_carryover = {}#amount of water that the district wants to request contract deliveries for recharge
self.delivery_carryover = {}#amount of water to deliver immediately becuase of surface storage spillage
self.contract_carryover_list = {}#maximum carryover storage on contract
self.carryover['tot'] = 0.0
self.projected_supply['tot'] = 0.0
#initialize values for all contracts in dictionaries
for y in self.contract_list_all:
self.current_balance[y] = 0.0
self.paper_balance[y] = 0.0
self.turnback_pool[y] = 0.0
self.projected_supply[y] = 0.0
self.carryover[y] = 0.0
self.recharge_carryover[y] = 0.0
self.delivery_carryover[y] = 0.0
self.contract_carryover_list[y] = 0.0
#initialize dictionaries to 'store' daily state variables (for export to csv)
self.daily_supplies = {}
supply_list = ['paper', 'carryover', 'allocation', 'delivery', 'flood_irrigation', 'leiu_applied', 'leiu_recharged', 'banked', 'pumping', 'leiu_delivered', 'recharge_delivery', 'recharge_uncontrolled']
for x in supply_list:
self.daily_supplies[x] = np.zeros(self.T)
#initialize dictionaries to 'store' annual change in state variables (for export to csv)
self.annual_supplies = {}
supply_list = ['delivery', 'flood_irrigation', 'leiu_applied','leiu_recharged', 'leiu_delivered', 'banked_accepted']
for x in supply_list:
self.annual_supplies[x] = np.zeros(self.number_years)
# hold all output
self.daily_supplies_full = {}
# delivery_list = ['tableA', 'cvpdelta', 'exchange', 'cvc', 'friant1', 'friant2','kaweah', 'tule', 'kern']
for x in self.contract_list_all:
self.daily_supplies_full[x + '_delivery'] = np.zeros(self.T)
self.daily_supplies_full[x + '_flood'] = np.zeros(self.T)
self.daily_supplies_full[x + '_flood_irrigation'] = np.zeros(self.T)
self.daily_supplies_full[x + '_recharged'] = np.zeros(self.T)
self.daily_supplies_full[x + '_projected'] = np.zeros(self.T)
self.daily_supplies_full[x + '_paper'] = np.zeros(self.T)
self.daily_supplies_full[x + '_carryover'] = np.zeros(self.T)
self.daily_supplies_full[x + '_turnback'] = np.zeros(self.T)
for x in self.non_contract_delivery_list:
self.daily_supplies_full[x] = np.zeros(self.T)
for x in ['recover_banked', 'inleiu_irrigation', 'inleiu_recharge', 'leiupumping', 'recharged', 'exchanged_GW', 'exchanged_SW', 'pumping', 'irr_demand']:
self.daily_supplies_full[x] = np.zeros(self.T)
# ['recover_banked', 'inleiu', 'leiupumping', 'recharged', 'exchanged_GW', 'exchanged_SW', 'undelivered_trades']
#Initialize demands
self.annualdemand = 0.0
self.dailydemand = 0.0
#recovery and pumping variables
#self.recovery_fraction = 0.5
self.annual_pumping = 0.0
self.use_recharge = 0.0
self.use_recovery = 0.0
self.extra_leiu_recovery = 0.0
self.max_recovery = 0.0
self.max_leiu_exchange = 0.0
self.direct_recovery_delivery = 0.0
#for in-district recharge & counters (for keeping track of how long a basin has been continuously 'wet'
self.recharge_rate = self.in_district_direct_recharge*cfs_tafd
self.thismonthuse = 0
self.monthusecounter = 0
self.monthemptycounter = 0
self.current_recharge_storage = 0.0
self.private_fraction = 0.0
self.has_private = False
self.has_pesticide = False
self.has_pmp = False
#banking dictionaries to keep track of individual member use & accounts
if self.in_leiu_banking:
self.recovery_use = {}
self.inleiubanked = {}
self.contract_exchange = {}
self.leiu_additional_supplies = {}
self.bank_deliveries = {}
self.tot_leiu_recovery_use = 0.0
self.direct_storage = {}
self.bank_timeseries = {}
self.annual_timeseries = {}
self.recharge_rate_series = np.zeros(self.T)
self.use_recovery = 0.0
self.leiu_trade_cap = 0.5
for x in self.participant_list:
self.recovery_use[x] = 0.0
self.inleiubanked[x] = 0.0
self.leiu_additional_supplies[x] = 0.0
self.bank_deliveries[x] = 0.0
self.direct_storage[x] = 0.0
self.bank_timeseries[x] = np.zeros(self.T)
self.annual_timeseries[x] = np.zeros(self.T)
self.contract_exchange[x] = np.zeros(self.T)
def object_equals(self, other):
##This function compares two instances of an object, returns True if all attributes are identical.
equality = {}
if (self.__dict__.keys() != other.__dict__.keys()):
return ('Different Attributes')
else:
differences = 0
for i in self.__dict__.keys():
if type(self.__getattribute__(i)) is dict:
equality[i] = True
for j in self.__getattribute__(i).keys():
if (type(self.__getattribute__(i)[j] == other.__getattribute__(i)[j]) is bool):
if ((self.__getattribute__(i)[j] == other.__getattribute__(i)[j]) == False):
equality[i] = False
differences += 1
else:
if ((self.__getattribute__(i)[j] == other.__getattribute__(i)[j]).all() == False):
equality[i] = False
differences += 1
else:
if (type(self.__getattribute__(i) == other.__getattribute__(i)) is bool):
equality[i] = (self.__getattribute__(i) == other.__getattribute__(i))
if equality[i] == False:
differences += 1
else:
equality[i] = (self.__getattribute__(i) == other.__getattribute__(i)).all()
if equality[i] == False:
differences += 1
return (differences == 0)
##################################SENSITIVITY ANALYSIS#################################################################
def set_sensitivity_factors(self, et_factor, acreage_factor, irr_eff_factor, recharge_decline_factor):
wyt_list = ['W', 'AN', 'BN', 'D', 'C']
for wyt in wyt_list:
for i,v in enumerate(self.crop_list):
self.acreage[wyt][i] = self.acreage[wyt][i]*acreage_factor
for monthloop in range(0,12):
self.irrdemand.etM[v][wyt][monthloop] = self.irrdemand.etM[v][wyt][monthloop]*et_factor
self.seepage = 1.0 + irr_eff_factor
for recharge_count in range(0, len(self.recharge_decline)):
self.recharge_decline[recharge_count] = 1.0 - recharge_decline_factor*(1.0 - self.recharge_decline[recharge_count])
#####################################################################################################################
##################################DEMAND CALCULATION#################################################################
#####################################################################################################################
def find_baseline_demands(self,wateryear):
self.monthlydemand = {}
wyt_list = ['W', 'AN', 'BN', 'D', 'C']
for wyt in wyt_list:
self.monthlydemand[wyt] = np.zeros(12)
for monthloop in range(0,12):
self.monthlydemand[wyt][monthloop] += self.urban_profile[monthloop]*self.MDD/self.days_in_month[self.non_leap_year][monthloop]
if self.has_pesticide:
for i,v in enumerate(self.acreage_by_year):
self.monthlydemand[wyt][monthloop] += max(self.irrdemand.etM[v][wyt][monthloop] - self.irrdemand.etM['precip'][wyt][monthloop],0.0)*(self.acreage_by_year[v][wateryear]-self.private_acreage[v][wateryear])/(12.0*self.days_in_month[self.non_leap_year][monthloop])
elif self.has_pmp:
for crop in self.pmp_acreage:
self.monthlydemand[wyt][monthloop] += max(self.irrdemand.etM[crop][wyt][monthloop] - self.irrdemand.etM['precip'][wyt][monthloop],0.0)*max(self.pmp_acreage[crop]-self.private_acreage[crop], 0.0)/(12.0*self.days_in_month[self.non_leap_year][monthloop])
else:
for i,v in enumerate(self.crop_list):
self.monthlydemand[wyt][monthloop] += max(self.irrdemand.etM[v][wyt][monthloop] - self.irrdemand.etM['precip'][wyt][monthloop],0.0)*(self.acreage[wyt][i]-self.private_acreage[v])/(12.0*self.days_in_month[self.non_leap_year][monthloop])
#self.monthlydemand[wyt][monthloop] += max(self.irrdemand.etM[v][wyt][monthloop] ,0.0)*self.acreage[wyt][i]/(12.0*self.days_in_month[self.non_leap_year][monthloop])
def calc_demand(self, wateryear, year, da, m, m1, wyt):
#from the monthlydemand dictionary (calculated at the beginning of each wateryear based on ag acreage and urban demands), calculate the daily demand and the remaining annual demand
monthday = self.days_in_month[year][m-1]
self.dailydemand = self.monthlydemand[wyt][m-1]*(monthday-da)/monthday + self.monthlydemand[wyt][m1-1]*da/monthday
if self.dailydemand < 0.0:
self.dailydemand = 0.0
#calculate that days 'starting' demand (b/c demand is filled @multiple times, and if we only want to fill a certain fraction of that demand (based on projections of supply & demand for the rest of the year), we want to base that fraction on that day's total demand, not the demand left after other deliveries are made
self.dailydemand_start = self.monthlydemand[wyt][m-1]*(monthday-da)/monthday + self.monthlydemand[wyt][m1-1]*da/monthday
#pro-rate this month's demand based on the day of the month when calculating remaining annual demand
self.annualdemand = max(self.monthlydemand[wyt][m-1]*(monthday-da), 0.0)
self.irrseasondemand = 0.0
for irrseason in range(6,9):
self.irrseasondemand += max(self.monthlydemand[wyt][irrseason]*self.days_in_month[year][irrseason], 0.0)
if m > 9:
for monthloop in range(m, 12):
self.annualdemand += max(self.monthlydemand[wyt][monthloop]*self.days_in_month[year][monthloop],0.0)
for monthloop in range(0,9):
self.annualdemand += max(self.monthlydemand[wyt][monthloop]*self.days_in_month[year+1][monthloop], 0.0)
else:
for monthloop in range(m, 9):
self.annualdemand += max(self.monthlydemand[wyt][monthloop]*self.days_in_month[year][monthloop], 0.0)
def find_pre_flood_demand(self, wyt, year):
#calculates an estimate for water use in the Oct-Dec period (for use in recharge_carryover calculations), happens Oct 1
self.pre_flood_demand = self.monthlydemand[wyt][9]*self.days_in_month[year][9] + self.monthlydemand[wyt][10]*self.days_in_month[year][10] + self.monthlydemand[wyt][11]*self.days_in_month[year][11]
def get_urban_demand(self, t, m, da, wateryear, year, sri, dowy, total_delta_pumping):
#this function finds demands for the 'branch pumping' urban nodes - Socal, South Bay, & Central Coast
#demand is equal to pumping of the main california aqueduct and into the branches that services these areas
#cal aqueduct urban demand comes from pumping data, calc seperately
if self.has_private:
if self.has_pesticide:
frac_to_district = 1.0 - self.private_fraction[wateryear]
else:
frac_to_district = 1.0 - self.private_fraction
else:
frac_to_district = 1.0
self.dailydemand = self.pumping[t]/1000.0
self.dailydemand_start = self.dailydemand
##Keep track of ytd pumping to Cal Aqueduct Branches
self.ytd_pumping[wateryear] += self.dailydemand
sri_estimate = (sri*self.delivery_percent_coefficient[dowy][0] + self.delivery_percent_coefficient[dowy][1])*total_delta_pumping*frac_to_district
self.annualdemand = max(0.0, (self.annual_pumping[wateryear]*dowy + sri_estimate*(364.0 - dowy))/364.0 - self.ytd_pumping[wateryear])
if m == 10 and da == 1:
start_of_month = 0
cross_counter_y = 0
###Divide aqueduct branch pumping into 'monthly demands'
for monthloop in range(0,12):
monthcounter = monthloop + 9
if monthcounter > 11:
monthcounter -= 12
cross_counter_y = 1
start_next_month = self.dowy_eom[year+cross_counter_y][monthcounter] + 1
for wyt in ['W', 'AN', 'BN', 'D', 'C']:
self.monthlydemand[wyt][monthcounter] = np.mean(self.pumping[(t + start_of_month):(t + start_next_month)])/1000.0
start_of_month = start_next_month
def set_pmp_acreage(self, water_constraint_by_source, land_constraint, x0):
self.acreage_by_pmp_crop_type = self.irrdemand.find_pmp_acreage(water_constraint_by_source,land_constraint, x0)
self.pmp_acreage = {}
i = 0
for crop in self.irrdemand.crop_list:
district_crops = self.irrdemand.crop_keys[crop]
if district_crops in self.pmp_acreage:
self.pmp_acreage[district_crops] += self.acreage_by_pmp_crop_type[i]/1000.0
else:
self.pmp_acreage[district_crops] = self.acreage_by_pmp_crop_type[i]/1000.0
i += 1
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
##################################PROJECT CONTRACTS#################################################################
#####################################################################################################################
def update_balance(self, t, wateryear, water_available, projected_allocation, current_water, key, tot_carryover, balance_type):
###This function takes input from the contract class (water_available, projected_allocation, tot_carryover) to determine how much of their allocation remains
##water_available is the current contract storage in the reservoir *plus* all deliveries from the given year. The deliveries, paper trades, and turnback pool accounts for each district
##are used to get a running tally of the surface water that is currently available to them. (tot_carryover is subtracted from the current balance - districts only get access to *their*
##carryover storage - which is added to their individual current balance (self.carryover[key])
##projected_allocation is the water that is forecasted to be available on each contract through the end of the water year *plus* water that has already been delivered on that contract
##individual deliveries are then subtracted from this total to determine the individual district's projected contract allocation remaining in that year
if self.has_private:
if self.has_pesticide:
frac_to_district = 1.0 - self.private_fraction[wateryear]
else:
frac_to_district = 1.0 - self.private_fraction
else:
frac_to_district = 1.0
if balance_type == 'contract':
#district_storage - district's water that is currently available (in storage at reservoir)
#(water_available - tot_carryover)*self.project_contract[key] - individual district share of the existing (in storage) contract balance, this includes contract water that has already been delivered to all contractors
#self.deliveries[key][wateryear] - individual district deliveries (how much of 'their' contract has already been delivered)
#self.carryover[key] - individual district share of contract carryover
#paper_balance[key] - keeps track of 'paper' groundwater trades (negative means they have accepted GW deliveries in exchange for trading some of their water stored in reservoir, positive means they sent their banked GW to another district in exchage for SW storage
#turnback_pool[key] - how much water was bought/sold on the turnback pool(negative is sold, positive is bought)
district_storage = (water_available-tot_carryover)*self.project_contract[key]*frac_to_district - self.deliveries[key][wateryear] + self.carryover[key] + self.paper_balance[key] + self.turnback_pool[key]
#annual allocation - remaining (undelivered) district share of expected total contract allocation
#same as above, but projected_allocation*self.project_contract[key] - individual share of expected total contract allocation, this includes contract water that has already been delivered to all contractors
annual_allocation = projected_allocation*self.project_contract[key]*frac_to_district - self.deliveries[key][wateryear] + self.carryover[key] + self.paper_balance[key] + self.turnback_pool[key]
storage_balance = current_water*self.project_contract[key]*frac_to_district + max(self.carryover[key] + self.paper_balance[key] + self.turnback_pool[key] - self.deliveries[key][wateryear], 0.0)
elif balance_type == 'right':
#same as above, but for contracts that are expressed as 'rights' instead of allocations
district_storage = (water_available-tot_carryover)*self.rights[key]['capacity']*frac_to_district - self.deliveries[key][wateryear] + self.carryover[key] + self.paper_balance[key] + self.turnback_pool[key]
annual_allocation = projected_allocation*self.rights[key]['capacity']*frac_to_district - self.deliveries[key][wateryear] + self.carryover[key] + self.paper_balance[key] + self.turnback_pool[key]
storage_balance = current_water*self.rights[key]['capacity']*frac_to_district + max(self.carryover[key] + self.paper_balance[key] + self.turnback_pool[key] - self.deliveries[key][wateryear], 0.0)
self.current_balance[key] = max(min(storage_balance,annual_allocation), 0.0)
self.projected_supply[key] = max(annual_allocation,0.0)
if key == 'xxx' or key == 'xxx':
if self.rights[key]['capacity'] > 0.0:
#print(wateryear, end = " ")
#print(t, end = " ")
#print(self.key, end = " ")
#print(key, end = " ")
#print("%.2f" % projected_allocation, end = " ")
#print("%.2f" % annual_allocation, end = " ")
#print("%.2f" % frac_to_district, end = " ")
#print("%.2f" % current_water, end = " ")
#print("%.2f" % tot_carryover, end = " ")
#print("%.2f" % self.deliveries[key][wateryear], end = " ")
#print("%.2f" % self.carryover[key], end = " ")
#print("%.2f" % self.paper_balance[key], end = " ")
#print("%.2f" % self.turnback_pool[key], end = " ")
#print("%.2f" % self.current_balance[key], end = " ")
#print("%.2f" % self.projected_supply[key], end = " ")
#print("%.2f" % self.annualdemand, end = " ")
#print("%.2f" % self.dailydemand, end = " ")
#print("%.2f" % self.recharge_carryover[key], end = " ")
print("%.2f" % self.use_recovery)
elif key == 'xxx' or key == 'xxx':
if self.project_contract[key] > 0.0:
#print(wateryear, end = " ")
#print(t, end = " ")
#print(self.key, end = " ")
#print(key, end = " ")
#print("%.2f" % projected_allocation, end = " ")
#print("%.2f" % annual_allocation, end = " ")
#print("%.2f" % frac_to_district, end = " ")
#print("%.2f" % current_water, end = " ")
#print("%.2f" % tot_carryover, end = " ")
#print("%.2f" % self.deliveries[key][wateryear], end = " ")
#print("%.2f" % self.carryover[key], end = " ")
#print("%.2f" % self.paper_balance[key], end = " ")
#print("%.2f" % self.turnback_pool[key], end = " ")
#print("%.2f" % self.current_balance[key], end = " ")
#print("%.2f" % self.projected_supply[key], end = " ")
#print("%.2f" % self.annualdemand, end = " ")
#print("%.2f" % self.dailydemand, end = " ")
#print("%.2f" % self.recharge_carryover[key], end = " ")
print("%.2f" % self.use_recovery)
return max(self.projected_supply[key] - self.annualdemand, 0.0) , max(self.carryover[key] - self.deliveries[key][wateryear], 0.0)
def calc_carryover(self, existing_balance, wateryear, balance_type, key):
#at the end of each wateryear, we tally up the full allocation to the contract, how much was used (and moved around in other balances - carryover, 'paper balance' and turnback_pools) to figure out how much each district can 'carryover' to the next year
if self.has_private:
if self.has_pesticide:
frac_to_district = 1.0 - self.private_fraction[wateryear]
else:
frac_to_district = 1.0 - self.private_fraction
else:
frac_to_district = 1.0
if balance_type == 'contract':
annual_allocation = existing_balance*self.project_contract[key]*frac_to_district - self.deliveries[key][wateryear] + self.carryover[key] + self.paper_balance[key] + self.turnback_pool[key]
max_carryover = self.contract_carryover_list[key]
elif balance_type == 'right':
annual_allocation = existing_balance*self.rights[key]['capacity']*frac_to_district - self.deliveries[key][wateryear] + self.carryover[key] + self.paper_balance[key] + self.turnback_pool[key]
max_carryover = self.contract_carryover_list[key]
reallocated_water = max(annual_allocation - max_carryover, 0.0)
self.carryover[key] = min(max_carryover, annual_allocation)
self.paper_balance[key] = 0.0
self.turnback_pool[key] = 0.0
return reallocated_water, self.carryover[key]
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
##################################RECHARGE/RECOVERY TRIGGERS#########################################################
#####################################################################################################################
def open_recovery(self,t, dowy, wateryear):
#this function determines if a district wants to recover banked water
#based on their demands and existing supplies
total_balance = 0.0
total_recovery = (366-dowy)*self.max_recovery + self.extra_leiu_recovery
existing_carryover = 0.0
for key in self.contract_list:
total_balance += self.projected_supply[key]
existing_carryover += max(self.carryover[key] - self.deliveries[key][wateryear], 0.0)
total_needs = self.annualdemand*self.seepage*self.surface_water_sa*self.recovery_fraction
if (total_balance + total_recovery) < total_needs:
if existing_carryover > 0.0:
self.use_recovery = 0.0
else:
self.use_recovery = 1.0
else:
self.use_recovery = 0.0
self.min_direct_recovery = max(self.annualdemand - total_balance,0.0)/(366-dowy)
def open_recharge(self,t,m,da,wateryear,year,numdays_fillup, numdays_fillup2, contract_carryover, key, wyt, reachable_turnouts, additional_carryover, contract_allocation):
#for a given contract owned by the district (key), how much recharge can they expect to be able to use
#before the reservoir associated w/ that contract fills to the point where it needs to begin spilling water
#(numdays_fillup) - i.e., how much surface water storage can we keep before start losing it
#self.recharge_carryover is the district variable that shows how much 'excess' allocation there is on a particular
#contract - i.e., how much of the allocation will not be able to be recharged before the reservoir spills
total_recharge = 0.0
total_recharge2 = 0.0
carryover_storage_proj = 0.0
spill_release_carryover = 0.0
is_reachable = 0
for x in reachable_turnouts:
for y in self.turnout_list:
if y == x:
is_reachable = 1
break
if is_reachable == 1:
break
if is_reachable == 0:
service_area_adjust = 0.0
else:
service_area_adjust = 1.0
adjusted_sw_sa = self.surface_water_sa*service_area_adjust
if numdays_fillup < 365.0:
#total_recharge_available = 0.0
#for y in self.contract_list:
#total_recharge_available += self.projected_supply[y]
#total_recharge_available -= self.annualdemand*adjusted_sw_sa*self.seepage
###Find projected recharge available to district
#if total_recharge_available > 0.0:
#total_recharge_capacity = (self.max_direct_recharge[0] + self.max_leiu_recharge[m])*(self.days_in_month[year][m]-da)
##calculate both direct & in leiu recharge available to the district through the end of this water year
#if m < 8:
#for future_month in range(m+1,9):
#total_recharge_capacity += self.max_direct_recharge[future_month - m]*self.days_in_month[year][future_month] + self.max_leiu_recharge[future_month]*self.days_in_month[year][future_month]
#elif m > 8:
#for future_month in range(m+1,12):
#total_recharge_capacity += self.max_direct_recharge[future_month - m]*self.days_in_month[year][future_month] + self.max_leiu_recharge[future_month]*self.days_in_month[year][future_month]
#for future_month in range(0,9):
#total_recharge_capacity += self.max_direct_recharge[future_month - m]*self.days_in_month[year+1][future_month] + self.max_leiu_recharge[future_month]*self.days_in_month[year][future_month]
#else:
#total_recharge_capacity = 0.0
#spill_release_carryover = max(total_recharge_available - total_recharge_capacity - additional_carryover, 0.0)
##how many days remain before the reservoir fills?
days_left = numdays_fillup
days_left2 = numdays_fillup2
#tabulate how much water can be recharged between now & reservoir fillup (current month)
this_month_recharge = (self.max_direct_recharge[0] + self.max_leiu_recharge[0])*min(self.days_in_month[year][m] - da,days_left)
this_month_recharge2 = (self.max_direct_recharge[0] + self.max_leiu_recharge[0])*min(self.days_in_month[year][m] - da,days_left2)
total_recharge += this_month_recharge
total_recharge2 += this_month_recharge2
#days before fillup remaining after current month
days_left -= (self.days_in_month[year][m] - da)
days_left2 -= (self.days_in_month[year][m] - da)
###if days_left remains positive (i.e., reservoir fills after the end of the current month)
###loop through future months to determine how much water can be recharged before reservoir fills
monthcounter = 0
monthcounter_loop = 0
next_year_counter = 0
while (monthcounter + monthcounter_loop) < 11 and days_left > 0.0:
monthcounter += 1
if (monthcounter + m) > 11:
monthcounter -= 12
monthcounter_loop = 12
next_year_counter = 1
# continue to tabulate how much water can be recharged between now & reservoir fillup (future months)
this_month_recharge = (self.max_direct_recharge[monthcounter+monthcounter_loop] + self.max_leiu_recharge[monthcounter+monthcounter_loop])*min(self.days_in_month[year+next_year_counter][m+monthcounter],days_left)
total_recharge += this_month_recharge
days_left -= self.days_in_month[year+next_year_counter][m+monthcounter]
###Uses the projected supply calculation to determine when to recharge water. There are a number of conditions under which a
###district will recharge water. Projected allocations are compared to total demand, recharge capacity, and the probability of
###surface water storage spilling carryover water. If any of these conditions triggers recharge, the district will release water
##for recharge
monthcounter = 0
monthcounter_loop = 0
next_year_counter = 0
while (monthcounter + monthcounter_loop) < 11 and days_left2 > 0.0:
monthcounter += 1
if (monthcounter + m) > 11:
monthcounter -= 12
monthcounter_loop = 12
next_year_counter = 1
# continue to tabulate how much water can be recharged between now & reservoir fillup (future months)
this_month_recharge2 = (self.max_direct_recharge[monthcounter+monthcounter_loop] + self.max_leiu_recharge[monthcounter+monthcounter_loop])*min(self.days_in_month[year+next_year_counter][m+monthcounter],days_left2)
total_recharge2 += this_month_recharge2
days_left2 -= self.days_in_month[year+next_year_counter][m+monthcounter]
###Uses the projected supply calculation to determine when to recharge water. There are a number of conditions under which a
###district will recharge water. Projected allocations are compared to total demand, recharge capacity, and the probability of
###surface water storage spilling carryover water. If any of these conditions triggers recharge, the district will release water
##for recharge
#carryover_storage_proj = max(self.projected_supply[key] - self.annualdemand*adjusted_sw_sa - total_recharge*service_area_adjust - self.contract_carryover_list[key]*adjusted_sw_sa, 0.0)
spill_release_carryover = 0.0
for y in self.contract_list:
spill_release_carryover += max(self.projected_supply[y] - self.carryover_rights[y], 0.0)
spill_release_carryover -= (self.annualdemand*adjusted_sw_sa + total_recharge2*service_area_adjust + self.demand_days['lookahead'][key])
spill_release_carryover = max(spill_release_carryover, 0.0)
carryover_storage_proj = 0.0
for y in self.contract_list:
carryover_storage_proj += max(self.carryover[y] - self.deliveries[y][wateryear] - self.carryover_rights[y], 0.0)
carryover_storage_proj -= (total_recharge*service_area_adjust + self.demand_days['current'][key])
carryover_storage_proj = max(carryover_storage_proj, 0.0)
#carryover_release_proj = min(carryover_storage_proj, max(total_recharge_available - total_recharge_capacity,0.0))
#carryover_release_current = max(self.carryover[key] - self.deliveries[key][wateryear] - total_recharge_carryover, 0.0)
#if contract_carryover > 0.0:
#spill_release_carryover = max(self.carryover[key] - self.deliveries[key][wateryear] - total_recharge, 0.0)
#else:
#spill_release_carryover = max(self.projected_supply[key] - self.annualdemand*adjusted_sw_sa - total_recharge*service_area_adjust - self.contract_carryover_list[key]*adjusted_sw_sa, 0.0)
##The amount of recharge a district wants is then saved and sent to the canal class where it 'looks' for an available spot to recharge the water
#self.recharge_carryover[key] = max(carryover_release_proj, carryover_release_current, spill_release_carryover, spill_release_storage)
if spill_release_carryover > carryover_storage_proj:
total_available_for_recharge = 0.0
for y in self.contract_list:
total_available_for_recharge += max(self.projected_supply[y], 0.0)
if total_available_for_recharge > 0.0:
self.recharge_carryover[key] = max(spill_release_carryover, 0.0)*max(self.projected_supply[key], 0.0)/total_available_for_recharge
else:
self.recharge_carryover[key] = 0.0
else:
total_available_for_recharge = 0.0
for y in self.contract_list:
total_available_for_recharge += max(self.carryover[y] - self.deliveries[y][wateryear], 0.0)
if total_available_for_recharge > 0.0:
self.recharge_carryover[key] = max(carryover_storage_proj, 0.0)*max(self.carryover[key] - self.deliveries[key][wateryear], 0.0)/total_available_for_recharge
else:
self.recharge_carryover[key] = 0.0
#if contract_allocation == 0:
#self.recharge_carryover[key] = max(self.recharge_carryover[key], self.projected_supply[key] - total_recharge*service_area_adjust - self.demand_days['current'][key], 0.0)
if key == 'xxx' or key == 'xxx' or key == 'xxx' or key == 'xxx':
#print(carryover_storage_proj, end = " ")
#print(spill_release_carryover, end = " ")
#print(total_recharge, end = " ")
#print(self.demand_days['current'][key], end = " ")
#print(total_recharge2, end = " ")
#print(self.demand_days['lookahead'][key], end = " ")
#print(total_available_for_recharge, end = " ")
print(self.recharge_carryover[key])
##Similar conditions also calculate the amount of regular tableA deliveries for direct irrigation to request
else:
self.delivery_carryover[key] = 0.0
self.recharge_carryover[key] = 0.0
def get_urban_recovery_target(self, pumping, project_contract, wateryear, dowy, year, wyt, demand_days, t, start_month):
max_pumping_shortfall = 0.0
pumping_shortfall = 0.0
if self.has_private:
if self.has_pesticide:
frac_to_district = 1.0 - self.private_fraction[wateryear]
else:
frac_to_district = 1.0 - self.private_fraction
else:
frac_to_district = 1.0
monthcounter = start_month
daycounter = 0
tot_days = 0
if demand_days > 365.0:
max_pumping_shortfall = 9999.9
else:
while tot_days < demand_days:
pumping_shortfall += np.sum(self.pumping[(t-dowy+tot_days):(t-dowy+tot_days+min(demand_days -tot_days, 30))]/1000.0) - pumping['swp']['gains'][monthcounter]*project_contract*frac_to_district
tot_days += 30
monthcounter += 1
if monthcounter == 12:
monthcounter = 0
max_pumping_shortfall = max(pumping_shortfall, max_pumping_shortfall)
return max(max_pumping_shortfall, 0.0)
def set_turnback_pool(self, key, year):
##This function creates the 'turnback pool' (note: only for SWP contracts now, can be used for others)
##finding contractors with 'extra' contract water that they would like to sell, and contractors who would
##like to purchase that water.
self.turnback_sales = 0.0
self.turnback_purchases = 0.0
total_recharge_ability = 0.0
total_projected_supply = 0.0
for y in self.contract_list:
total_projected_supply += self.projected_supply[y]
for month_count in range(0, 4):
# total recharge Jun,Jul,Aug,Sep
total_recharge_ability += self.max_direct_recharge[month_count]*self.days_in_month[year][month_count + 5]
if total_projected_supply > 0.0:
contract_fraction = max(min(self.projected_supply[key]/total_projected_supply, 1.0), 0.0)
else:
contract_fraction = 0.0
#districts sell water if their projected contracts are greater than their remaining annual demand, plus their remaining recharge capacity in this water year, plus their recharge capacity in the next water year (through January)
self.turnback_sales = max(self.projected_supply[key] - self.carryover_rights[key] - (self.annualdemand + total_recharge_ability + self.pre_flood_demand)*contract_fraction, 0.0)
if self.in_leiu_banking:
self.turnback_purchases = 0.0
else:
##districts buy turnback water if their annual demands are greater than their projected supply plus their capacity to recover banked groundwater
self.turnback_purchases = max(self.annualdemand*contract_fraction + self.carryover_rights[key] - self.projected_supply[key] - self.max_recovery*122*contract_fraction, 0.0)
return self.turnback_sales, self.turnback_purchases
def make_turnback_purchases(self, turnback_sellers, turnback_buyers, key):
#once we know how much water is in the 'selling' pool and the 'buying' pool, we can determine the total turnback pool - min(buying,selling), then
#determine what % of each request is filled (i.e., if the selling pool is only 1/2 of the buying pool, then buyers only get 1/2 of their request, or visa versa
if min(turnback_sellers, turnback_buyers) > 0.0:
sellers_frac = -1*min(turnback_sellers, turnback_buyers)/turnback_sellers
buyers_frac = min(turnback_sellers, turnback_buyers)/turnback_buyers
total_projected_supply = 0.0
for y in self.contract_list:
#the buying/selling fractiosn are applied to the same calculations above (about buying or selling needs), and then turnback pools are added/subtracted to the districts contract
total_projected_supply += self.projected_supply[y]
if self.turnback_sales > 0.0:
self.turnback_pool[key] = max(self.turnback_sales, 0.0)*sellers_frac
elif self.turnback_purchases > 0.0:
if self.in_leiu_banking:
self.turnback_pool[key] = 0.0
else:
self.turnback_pool[key] = max(self.turnback_purchases, 0.0)*buyers_frac
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
##################################DETERMINE DELIVERIES ON CANAL######################################################
#####################################################################################################################
def find_node_demand(self,contract_list, search_type, partial_demand_toggle, toggle_recharge):
#this function is used to calculate the current demand at each 'district' node
access_mult = self.surface_water_sa*self.seepage#this accounts for water seepage & the total district area that can be reached by SW canals (seepage is >= 1.0; surface_water_sa <= 1.0)
total_projected_allocation = 0.0
private_add = 0.0
if self.has_private:
for xx in self.private_demand:
private_add += min(self.private_demand[xx], self.private_delivery[xx])
for y in contract_list:
total_projected_allocation += max(self.projected_supply[y.name], 0.0)#projected allocation
#percentage of demand filled in the day is equal to the total projected allocation as a percent of annual demand
#(i.e., if allocations are projected to be 1/2 of annual demand, then they try to fill 50% of daily irrigation demands with surface water
if self.annualdemand*access_mult > 0.0 and partial_demand_toggle == 1:
if self.must_fill == 1:
#pumping to urban branches of the Cal Aqueduct is 'must fill', (i.e., demand is always met)
total_demand_met = 1.0
else:
if self.annualdemand*access_mult > 0.0:
#total_demand_met = min(max(total_projected_allocation/(self.annualdemand*access_mult), 0.0), 1.0)
total_demand_met = 1.0
else:
total_demand_met = 0.0
#total_demand_met = min(max(total_projected_allocation/(self.annualdemand*access_mult), 0.0), 1.0)
#elif self.annualdemand*access_mult > 0.0:
#total_demand_met = 1.0 - min(max(total_projected_allocation/(self.annualdemand*access_mult), 0.0), 1.0)
else:
total_demand_met = 1.0
#self.dailydemand_start is the initial daily district demand (self.dailydemand is updated as deliveries are made) - we try to fill the total_demand_met fraction of dailydemand_start, or what remains of demand in self.dailydemand, whichever is smaller
if search_type == 'flood':
if self.annualdemand > 0.0 and total_projected_allocation > self.annualdemand:
#demand_constraint = (1.0 - min(total_projected_allocation/self.annualdemand, 1.0))*max(min(self.dailydemand_start*access_mult*total_demand_met, self.dailydemand*access_mult),0.0)
demand_constraint = max(min(self.dailydemand_start*access_mult*total_demand_met, self.dailydemand*access_mult),0.0)
else:
demand_constraint = max(min(self.dailydemand_start*access_mult*total_demand_met, self.dailydemand*access_mult),0.0)
else:
demand_constraint = max(min(self.dailydemand_start*access_mult*total_demand_met, self.dailydemand*access_mult),0.0)
#if we want to include recharge demands in the demand calculations, add available recharge space
if toggle_recharge == 1:
demand_constraint += max(self.in_district_storage - self.current_recharge_storage, 0.0)
return demand_constraint + private_add
def find_node_output(self):
#this function calculates the total recovery capacity that is contained in each district node
#(i.e. in leiu banks)
if self.in_leiu_banking:
current_recovery_use = 0.0
for x in self.recovery_use:
current_recovery_use += self.recovery_use[x]
output_constraint = self.leiu_recovery - current_recovery_use
else:
output_constraint = 0.0
return output_constraint
def find_leiu_output(self, contract_list, ownership, member_name, wateryear):
member_constraint = 0.0
total_contract = np.zeros(len(self.contract_list))
if self.in_leiu_banking:
bank_counter = 0
for bank_contracts in self.contract_list:
for exchange_contracts in contract_list:
if bank_contracts == exchange_contracts.name:
#member_constraint += max(min(self.current_balance[bank_contracts]*ownership, self.projected_supply[bank_contracts]*ownership, (self.projected_supply[bank_contracts] - self.paper_balance[bank_contracts])*ownership - self.contract_exchange[member_name][wateryear]), 0.0)
#total_contract[bank_counter] += max(min(self.current_balance[bank_contracts]*ownership, self.projected_supply[bank_contracts]*ownership, (self.projected_supply[bank_contracts] - self.paper_balance[bank_contracts])*ownership - self.contract_exchange[member_name][wateryear]), 0.0)
member_constraint += max(min(self.current_balance[bank_contracts], self.projected_supply[bank_contracts]), 0.0)
total_contract[bank_counter] += max(min(self.current_balance[bank_contracts], self.projected_supply[bank_contracts]), 0.0)
bank_counter += 1
if member_constraint > 0.0:
for bank_contract_counter in range(0, len(total_contract)):
total_contract[bank_contract_counter] = total_contract[bank_contract_counter]/member_constraint
return member_constraint, total_contract
def set_request_constraints(self, demand, search_type, contract_list, bank_space, bank_capacity, dowy, wateryear):
#this function is used to determine if a district node 'wants' to make a request
#under the different usage types (flood, delievery, banking, or recovery) under a given contract
#(contract_list)
self.projected_supply['tot'] = 0.0
total_recharge = 0.0
for y in self.contract_list:
self.projected_supply['tot'] += self.projected_supply[y]
total_recharge += self.recharge_carryover[y]
#for flood deliveries, a district requests water if they don't have
#excess contract water that they don't think they can recharge (i.e. they don't purchase
#flood water if they can't use all their contract water
if search_type == "flood":
if self.projected_supply['tot'] > self.annualdemand:
return demand
else:
return demand
#for y in contract_list:
#tot_recharge += self.delivery_carryover[y.name]
#if tot_recharge <= 0.0:
#return demand
#else:
#return 0.0
#for normal irrigation deliveries, a district requests water if they have enough water currently
#in surface water storage under the given contract
if search_type == "delivery":
private_add = 0.0
if self.has_private:
for xx in self.private_demand:
private_add += min(self.private_demand[xx], self.private_delivery[xx])
total_current_balance = 0.0
total_projected_supply = 0.0
total_carryover = 0.0
friant_toggle = 0
delta_toggle = 0
for y in contract_list:
total_current_balance += max(self.current_balance[y.name], 0.0)
total_projected_supply += max(self.projected_supply[y.name], 0.0)
total_carryover += max(self.carryover[y.name] - self.deliveries[y.name][wateryear], 0.0)
if self.project_contract['cvpdelta'] > 0.0 or self.project_contract['exchange'] > 0.0:
delta_toggle = 1
if self.seasonal_connection == 1:
if self.must_fill == 1:
return max(min(demand, total_current_balance), 0.0) + private_add
elif total_carryover > 0.0 or total_projected_supply > self.annualdemand:
return max(min(demand, total_current_balance), 0.0) + private_add
elif delta_toggle == 1:
return max(min(demand, total_current_balance, total_projected_supply), 0.0) + private_add
#elif dowy < 273:
#if total_projected_supply > self.irrseasondemand:
#demand_fraction = min(max((total_projected_supply - self.irrseasondemand)/(self.annualdemand - self.irrseasondemand), 0.0), 1.0)
#return max(min(demand_fraction*demand,total_current_balance), 0.0) + private_add
#if self.annualdemand > 0.0:
#return max(min(demand*min(total_projected_supply/self.annualdemand, 1.0),total_current_balance), 0.0) + private_add
#else:
#return max(min(demand,total_current_balance), 0.0) + private_add
else:
conservative_estimate = max(min((dowy- 211.0)/(273.0 - 211.0), 1.0), 0.0)
if self.annualdemand > 0.0:
return max(min(demand*min(conservative_estimate*total_projected_supply/self.annualdemand, 1.0),total_current_balance), 0.0) + private_add
else:
return max(min(demand,total_current_balance), 0.0) + private_add
else:
return private_add
#for banking, a district requests water if they have enough contract water currently in surface water storage and they have 'excess' water for banking (calculated in self.open_recharge)
if search_type == "banking":
total_carryover_recharge = 0.0
total_current_balance = 0.0
for y in contract_list:
total_carryover_recharge += max(self.recharge_carryover[y.name], 0.0)
total_current_balance += max(self.current_balance[y.name], 0.0)
return min(total_carryover_recharge, total_current_balance, max(bank_capacity - bank_space, 0.0))
#for recovery, a district requests recovery water from a bank if they have contracts under the current contract being searched (i.e., so they aren't requesting water that will be sent to another district that can't make 'paper' trades with them) and if they have their 'recovery threshold' triggered (self.use_recovery, calculated in self.open_recovery)
if search_type == "recovery":
member_trades = 0
for member_contracts in self.contract_list:
for exchange_contracts in contract_list:
if member_contracts == exchange_contracts.name:
member_trades = 1
if member_trades == 1:
if self.use_recovery == 1.0:
total_request = min(max(self.dailydemand*self.surface_water_sa*self.seepage, 0.0), max(bank_space, 0.0))
else:
total_request = 0.0
else:
total_request = 0.0
return total_request
def set_demand_priority(self, priority_list, contract_list, demand, delivery, demand_constraint, search_type, contract_canal):
#this function takes a the calculated demand at each district node and classifies those demands by 'priority' - the priority classes and rules change for each delivery type
demand_dict = {}
#for flood deliveries, the priority structure is based on if you have a contract with the reservoir that is being spilled, if you have a turnout on a canal that is a 'priority canal' for the spilling reservoir, and then finally if you are not on a priority canal for spilling
if search_type == 'flood':
contractor_toggle = 0
priority_toggle = 0
for yy in priority_list:#canals that have 'priority' from the given reservoir
if yy.name == contract_canal:#current canal
priority_toggle = 1
if priority_toggle == 1:
for y in contract_list:#contracts that are being spilled (b/c they are held at the spilling reservoir)
for yx in self.contract_list:
if y.name == yx:
contractor_toggle = 1
if contractor_toggle == 1:
demand_dict['contractor'] = max(min(demand,delivery), 0.0)
demand_dict['alternate'] = min(delivery - max(min(demand,delivery),0.0),demand_constraint-demand_dict['contractor'])
demand_dict['turnout'] = 0.0
demand_dict['excess'] = 0.0
else:
demand_dict['contractor'] = 0.0
demand_dict['alternate'] = 0.0
demand_dict['turnout'] = max(min(demand,delivery), 0.0)
demand_dict['excess'] = 0.0
else:
demand_dict['contractor'] = 0.0
demand_dict['alternate'] = 0.0
demand_dict['turnout'] = 0.0
demand_dict['excess'] = max(min(demand,delivery), 0.0)
#irrigation deliveries have only one type of priority (the contract that is currently being deliveried)
elif search_type == 'delivery':
demand_dict[contract_canal] = max(min(demand,delivery), 0.0)
#in-leiu banks have demands that are either priority (capacity that the district has direct ownership over) or secondary (excess capacity that isn't being used by the owner)
elif search_type == 'banking':
priority_toggle = 0
for yy in priority_list:#canals that have 'priority' from the given reservoir
if yy.name == contract_canal:#current canal
priority_toggle = 1
if priority_toggle == 1:
demand_dict['priority'] = max(min(demand,delivery), 0.0)
demand_dict['secondary'] = min(delivery - max(min(demand,delivery),0.0),demand_constraint-demand_dict['priority'])
else:
demand_dict['priority'] = 0.0
demand_dict['secondary'] = max(min(delivery, demand_constraint), 0.0)
#recovery is the same priority structure as banking, but we use different names (initial & supplemental) to keep things straight)
elif search_type == 'recovery':
if self.in_leiu_banking:
demand_dict['initial'] = max(min(demand,delivery), 0.0)
demand_dict['supplemental'] = min(delivery - max(min(demand,delivery), 0.0), demand_constraint - demand_dict['initial'])
else:
demand_dict['initial'] = 0.0
demand_dict['supplemental'] = 0.0
return demand_dict
def find_leiu_priority_space(self, demand_constraint, num_members, member_name, toggle_recharge, search_type):
#this function finds how much 'priority' space in the recharge/recovery capacity is owned by a member (member_name) in a given in-leiu bank (i.e. this function is attached to the district that owns the bank - and the banking member is represented by 'member_name' input variable)
if search_type == "recovery":
priority_space = max(min(self.leiu_recovery*self.leiu_ownership[member_name] - self.recovery_use[member_name], demand_constraint), 0.0)
available_banked = self.inleiubanked[member_name]
return min(priority_space, available_banked)
else:
initial_capacity = self.dailydemand_start*self.surface_water_sa*self.seepage
if toggle_recharge == 1:
initial_capacity += self.in_district_storage
priority_space = max(min((self.leiu_ownership[member_name]*initial_capacity - self.bank_deliveries[member_name]), demand_constraint)/num_members, 0.0)
return priority_space
def set_deliveries(self, priorities,type_fractions,type_list,search_type,toggle_district_recharge,member_name, wateryear):
#this function takes the deliveries, seperated by priority, and updates the district's daily demand and/or recharge storage
final_deliveries = 0.0
total_direct_deliveries = 0.0
total_recharge_deliveries = 0.0
for zz in type_list:
total_deliveries = priorities[zz]*type_fractions[zz]
final_deliveries += total_deliveries
if self.has_private:
private = 0.0
for xx in self.private_demand:
private += min(self.private_demand[xx], self.private_delivery[xx])
if search_type == 'flood':
total_recharge_deliveries = min(max(final_deliveries - private, 0.0), self.in_district_storage - self.current_recharge_storage)
total_direct_deliveries = min(max(final_deliveries - private - total_recharge_deliveries, 0.0)/self.seepage, self.dailydemand*self.surface_water_sa)
else:
total_direct_deliveries = min(max(final_deliveries - private, 0.0)/self.seepage, self.dailydemand*self.surface_water_sa)
if toggle_district_recharge == 1:
total_recharge_deliveries = min(max((final_deliveries - private)/self.seepage - total_direct_deliveries, 0.0), self.in_district_storage - self.current_recharge_storage)
else:
total_recharge_deliveries = 0.0
self.dailydemand -= total_direct_deliveries
self.current_recharge_storage += total_recharge_deliveries
#final_deliveries += total_recharge_deliveries
else:
if search_type == 'flood':
total_recharge_deliveries = min(max(final_deliveries, 0.0), self.in_district_storage - self.current_recharge_storage)
total_direct_deliveries = min(max(final_deliveries - total_recharge_deliveries, 0.0)/self.seepage, self.dailydemand*self.surface_water_sa)
else:
total_direct_deliveries = min(max(final_deliveries, 0.0)/self.seepage, self.dailydemand*self.surface_water_sa)
if toggle_district_recharge == 1:
total_recharge_deliveries = min(max((final_deliveries)/self.seepage - total_direct_deliveries, 0.0), self.in_district_storage - self.current_recharge_storage)
else:
total_recharge_deliveries = 0.0
self.dailydemand -= total_direct_deliveries
self.current_recharge_storage += total_recharge_deliveries
return total_direct_deliveries, total_recharge_deliveries, final_deliveries - total_direct_deliveries - total_recharge_deliveries
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
##################################ADJUSST ACCOUNTS AFTER DELIVERY####################################################
#####################################################################################################################
def give_paper_trade(self, trade_amount, contract_list, wateryear, district_name):
#this function accepts a delivery of recovered groundwater, and makes a 'paper'
#trade, giving up a surface water contract allocation (contract_list) to the district
#that owned the groundwater that was recovered
if self.seepage > 0.0:
total_alloc = 0.0
for y in contract_list:
total_alloc += self.projected_supply[y.name]
actual_delivery = min(trade_amount, total_alloc, self.dailydemand*self.seepage*self.surface_water_sa)
self.dailydemand -= actual_delivery/self.seepage
if total_alloc > 0.0:
for y in contract_list:
self.paper_balance[y.name] -= actual_delivery*self.projected_supply[y.name]/total_alloc
self.deliveries['exchanged_SW'][wateryear] += actual_delivery
return actual_delivery
def give_paper_exchange(self, trade_amount, contract_list, trade_frac, wateryear, district_name):
#this function accepts a delivery of recovered groundwater, and makes a 'paper'
#trade, giving up a surface water contract allocation (contract_list) to the district
#that owned the groundwater that was recovered
contract_counter = 0
for y in contract_list:
self.paper_balance[y] -= trade_amount*trade_frac[contract_counter]
contract_counter += 1
self.deliveries['exchanged_SW'][wateryear] += trade_amount
def get_paper_trade(self, trade_amount, contract_list, wateryear):
#this function takes a 'paper' credit on a contract and allocates it to a district
#the paper credit is in exchange for delivering recovered groundwater to another party (district)
total_alloc = 0.0
contract_frac = 0.0
for y in contract_list:
total_alloc += self.projected_supply[y.name]
if total_alloc > 0.0:
for y in contract_list:
self.paper_balance[y.name] += trade_amount*self.projected_supply[y.name]/total_alloc
else:
contract_frac = 1.0
for y in contract_list:
self.paper_balance[y.name] += trade_amount*contract_frac
contract_frac = 0.0
self.deliveries['exchanged_GW'][wateryear] += trade_amount
def get_paper_exchange(self, trade_amount, contract_list, trade_frac, wateryear):
#this function takes a 'paper' credit on a contract and allocates it to a district
#the paper credit is in exchange for delivering recovered groundwater to another party (district)
total_alloc = 0.0
contract_frac = 0.0
contract_counter = 0
for y in contract_list:
self.paper_balance[y] += trade_amount*trade_frac[contract_counter]
contract_counter += 1
self.deliveries['exchanged_GW'][wateryear] += trade_amount
def record_direct_delivery(self, delivery, wateryear):
actual_delivery = min(delivery, self.dailydemand*self.seepage*self.surface_water_sa)
self.deliveries['recover_banked'][wateryear] += actual_delivery
self.dailydemand -= actual_delivery/(self.seepage*self.surface_water_sa)
self.direct_recovery_delivery = 0.0
return actual_delivery
def direct_delivery_bank(self, delivery, wateryear):
#this function takes a delivery of recoverd groundwater and applies it to irrigation demand in a district
#the recovered groundwater is delivered to the district that originally owned the water, so no 'paper' trade is needed
actual_delivery = min(delivery, self.dailydemand*self.seepage*self.surface_water_sa - self.direct_recovery_delivery)
#self.deliveries['recover_banked'][wateryear] += actual_delivery
self.direct_recovery_delivery += actual_delivery
#self.dailydemand -= actual_delivery/self.seepage*self.surface_water_sa
return actual_delivery
def adjust_accounts(self, direct_deliveries, recharge_deliveries, contract_list, search_type, wateryear):
#this function accepts water under a specific condition (flood, irrigation delivery, banking), and
#adjusts the proper accounting balances
total_carryover_recharge = 0.0
total_current_balance = 0.0
delivery_by_contract = {}
for y in contract_list:
if search_type == 'flood':
total_current_balance += 1.0
elif search_type == 'delivery':
total_current_balance += max(self.projected_supply[y.name], 0.0)
elif search_type == 'banking':
total_current_balance += max(self.recharge_carryover[y.name], 0.0)
elif search_type == 'recovery':
total_current_balance += max(self.current_balance[y.name], 0.0)
delivery_by_contract[y.name] = 0.0
flood_counter = 0
for y in contract_list:
#find the percentage of total deliveries that come from each contract
if search_type == 'flood':
if flood_counter == 0:
contract_deliveries = (direct_deliveries + recharge_deliveries)
flood_counter = 1
else:
contract_deliveries = 0.0
elif total_current_balance > 0.0:
if search_type == 'delivery':
contract_deliveries = (direct_deliveries + recharge_deliveries)*max(self.projected_supply[y.name], 0.0)/total_current_balance
elif search_type == 'banking':
contract_deliveries = (direct_deliveries + recharge_deliveries)*max(self.recharge_carryover[y.name], 0.0)/total_current_balance
elif search_type == 'recovery':
contract_deliveries = (direct_deliveries + recharge_deliveries)*max(self.current_balance[y.name], 0.0)/total_current_balance
else:
contract_deliveries = 0.0
delivery_by_contract[y.name] = contract_deliveries
#flood deliveries do not count against a district's contract allocation, so the deliveries are recorded as 'flood'
if search_type == "flood":
if contract_deliveries > 0.0:
self.deliveries[y.name + '_flood'][wateryear] += recharge_deliveries
self.deliveries[y.name + '_flood_irrigation'][wateryear] += direct_deliveries
else:
#irrigation/banking deliveries are recorded under the contract name so they are included in the
#contract balance calculations
#update the individual district accounts
self.deliveries[y.name][wateryear] += contract_deliveries
self.current_balance[y.name] -= contract_deliveries
if search_type == 'banking':
#if deliveries ar for banking, update banking accounts
self.deliveries['recharged'][wateryear] += contract_deliveries
self.deliveries[y.name+'_recharged'][wateryear] += contract_deliveries
self.recharge_carryover[y.name] -= min(contract_deliveries, self.recharge_carryover[y.name])
return delivery_by_contract
def adjust_bank_accounts(self, member_name, direct_deliveries, recharge_deliveries, wateryear):
#when deliveries are made for banking, keep track of the member's individual accounts
self.bank_deliveries[member_name] += direct_deliveries + recharge_deliveries#keeps track of how much of the capacity is being used in the current timestep
self.deliveries['inleiu_irrigation'][wateryear] += direct_deliveries#if deliveries being made 'inleiu', then count as inleiu deliveries
self.deliveries['inleiu_recharge'][wateryear] += recharge_deliveries#if deliveries being made 'inleiu', then count as inleiu deliveries
self.inleiubanked[member_name] += (direct_deliveries + recharge_deliveries) * self.inleiuhaircut#this is the running account of the member's banking storage
def adjust_recovery(self, deliveries, member_name, wateryear):
#if recovery deliveries are made, adjust the banking accounts and account for the recovery capacity use
self.inleiubanked[member_name] -= deliveries#this is the running account of the member's banking storage
self.deliveries['leiupumping'][wateryear] += deliveries
self.recovery_use[member_name] += deliveries#keeps track of how much of the capacity is being used in the current timestep
def adjust_exchange(self, deliveries, member_name, wateryear):
#if recovery deliveries are made, adjust the banking accounts and account for the recovery capacity use
self.inleiubanked[member_name] -= deliveries#this is the running account of the member's banking storage
self.deliveries['leiupumping'][wateryear] += deliveries
self.contract_exchange[member_name][wateryear] += deliveries
def absorb_storage(self):
#water delivered to a bank as 'storage' (on the surface) is 'absorbed', clearing up storage space for the next timestep
#also triggers self.thismonthuse, which keeps track of how many conecutive months a recharge bank is used (and the effect on the recharge rate of the spreading pool)
if self.in_leiu_banking:
if self.current_recharge_storage > self.recharge_rate*0.75:
self.thismonthuse = 1
if self.current_recharge_storage > 0.0:
absorb_fraction = min(self.in_district_direct_recharge*cfs_tafd/self.current_recharge_storage,1.0)
for x in self.participant_list:
self.current_recharge_storage -= self.current_recharge_storage*absorb_fraction
else:
if self.current_recharge_storage > self.recharge_rate*0.75:
self.thismonthuse = 1
if self.current_recharge_storage > 0.0:
absorb_fraction = min(self.recharge_rate/self.current_recharge_storage,1.0)
self.current_recharge_storage -= self.current_recharge_storage*absorb_fraction
self.current_recharge_storage = max(self.current_recharge_storage, 0.0)
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
##################################RECORD STATE VARIABLES###############################################################
#####################################################################################################################
def reset_recharge_recovery(self):
self.max_direct_recharge = np.zeros(12)
self.max_leiu_recharge = np.zeros(12)
self.total_banked_storage = 0.0
self.max_leiu_exchange = 0.0
def accounting_full(self, t, wateryear):
# keep track of all contract amounts
for x in self.contract_list_all:
self.daily_supplies_full[x + '_delivery'][t] = self.deliveries[x][wateryear]
self.daily_supplies_full[x + '_flood'][t] = self.deliveries[x + '_flood'][wateryear]
self.daily_supplies_full[x + '_flood_irrigation'][t] = self.deliveries[x + '_flood_irrigation'][wateryear]
self.daily_supplies_full[x + '_recharged'][t] = self.deliveries[x + '_recharged'][wateryear]
self.daily_supplies_full[x + '_projected'][t] = self.projected_supply[x]
self.daily_supplies_full[x + '_paper'][t] = self.paper_balance[x]
self.daily_supplies_full[x + '_carryover'][t] = self.carryover[x]
self.daily_supplies_full[x + '_turnback'][t] = self.turnback_pool[x]
for x in self.non_contract_delivery_list:
self.daily_supplies_full[x][t] = self.deliveries[x][wateryear]
self.daily_supplies_full['pumping'][t] = self.annual_private_pumping
self.daily_supplies_full['irr_demand'][t] = self.dailydemand_start
self.daily_supplies_full['recover_banked'][t] = self.deliveries['recover_banked'][wateryear]
self.daily_supplies_full['inleiu_irrigation'][t] = self.deliveries['inleiu_irrigation'][wateryear]
self.daily_supplies_full['inleiu_recharge'][t] = self.deliveries['inleiu_recharge'][wateryear]
self.daily_supplies_full['leiupumping'][t] = self.deliveries['leiupumping'][wateryear]
self.daily_supplies_full['recharged'][t] = self.deliveries['recharged'][wateryear]
self.daily_supplies_full['exchanged_GW'][t] = self.deliveries['exchanged_GW'][wateryear]
self.daily_supplies_full['exchanged_SW'][t] = self.deliveries['exchanged_SW'][wateryear]
def accounting(self,t, da, m, wateryear,key):
#takes delivery/allocation values and builds timeseries that show what water was used for (recharge, banking, irrigation etc...)
#delivery/allocation data are set cumulatively - so that values will 'stack' in a area plot.
#Allocations are positive (stack above the x-axis in a plot)
self.daily_supplies['paper'][t] += self.projected_supply[key]
self.daily_supplies['carryover'][t] += max(self.projected_supply[key] - self.paper_balance[key], 0.0)
self.daily_supplies['allocation'][t] += max(self.projected_supply[key] - self.paper_balance[key] - self.carryover[key], 0.0)
#while deliveries are negative (stack below the x-axis in a plot) - the stacking adjustments come in self.accounting_banking_activity()
self.daily_supplies['delivery'][t] -= self.deliveries[key][wateryear]
self.daily_supplies['flood_irrigation'][t] -= (self.deliveries[key][wateryear] + self.deliveries[key + '_flood_irrigation'][wateryear])
self.daily_supplies['recharge_uncontrolled'][t] -= self.deliveries[key + '_flood'][wateryear]
if m == 9 and da == 30:
self.annual_supplies['delivery'][wateryear] += self.deliveries[key][wateryear]
self.annual_supplies['flood_irrigation'][wateryear] += self.deliveries[key + '_flood_irrigation'][wateryear]
self.deliveries['undelivered_trades'][wateryear] += max(self.paper_balance[key] - self.deliveries[key][wateryear], 0.0)
def accounting_banking_activity(self, t, da, m, wateryear):
#this is an adjustment for 'delivery' (the delivery values are negative, so adding 'recharged' and 'exchanged_GW' is removing them from the count for 'deliveries' - we only want deliveries for irrigation, not for recharge
#exchanged_GW is GW that has been pumped out of a bank and 'delivered' to another district. the district gets credit in the reservoir, and deliveries of SW from that reservoir are recorded as 'deliveries' - but we don't want to count that here
#exchanged_SW is GW that has been pumped out of a bank, not owned by the district, and delivered to that district (i.e., the other side of the exchanged_GW in a GW exchange). This should technically count as an irrigation delivery from a contract
#(we want to record that as delivery here) but it doesn't get recorded that way upon delivery. so we add it back here when were are recording accounts (i.e. exchanged_GW and exchanged_SW are counters to help us square the records from GW exchanges)
self.daily_supplies['delivery'][t] += self.deliveries['recharged'][wateryear] + self.deliveries['exchanged_GW'][wateryear] - self.deliveries['exchanged_SW'][wateryear]
self.daily_supplies['flood_irrigation'][t] += self.deliveries['recharged'][wateryear] + self.deliveries['exchanged_GW'][wateryear] - self.deliveries['exchanged_SW'][wateryear]
#leiu accepted are irrigation deliveries that come from the in-leiu banking district's banking partners (i.e., they use it, and record a 'balance' for whoever delivered it)
self.daily_supplies['leiu_applied'][t] += self.daily_supplies['flood_irrigation'][t] - self.deliveries['inleiu_irrigation'][wateryear]
self.daily_supplies['leiu_recharged'][t] += self.daily_supplies['leiu_applied'][t] - self.deliveries['inleiu_recharge'][wateryear]
#banked is uncontrolled (or flood) water that has been banked by a district (in-district)
self.daily_supplies['banked'][t] += self.daily_supplies['leiu_recharged'][t] + self.deliveries['exchanged_SW'][wateryear] - self.deliveries['exchanged_GW'][wateryear] - self.deliveries['recover_banked'][wateryear]
##pumping is private pumping for irrigation
self.daily_supplies['pumping'][t] += self.daily_supplies['banked'][t] - self.annual_private_pumping
##leiu_delivered is water from an in-leiu banking district that gets delivered to (recovered by) their banking partners
self.daily_supplies['leiu_delivered'][t] += self.daily_supplies['pumping'][t] - self.deliveries['leiupumping'][wateryear]
#recharge delivery is water recharged at a bank that comes from the district's contract amount (instead of flood/uncontrolled water)
self.daily_supplies['recharge_delivery'][t] += self.daily_supplies['leiu_delivered'][t] - self.deliveries['recharged'][wateryear]
#recharge uncontrolled is recharge water from flood flows (flood flows added in self.accounting() - this is only adjustment for stacked plot)
self.daily_supplies['recharge_uncontrolled'][t] += self.daily_supplies['recharge_delivery'][t]
if m == 9 and da == 30:
self.annual_supplies['delivery'][wateryear] += self.deliveries['exchanged_SW'][wateryear] - self.deliveries['recharged'][wateryear] - (self.deliveries['exchanged_GW'][wateryear] - self.deliveries['undelivered_trades'][wateryear])
recharged_recovery = 0.0
if self.annual_supplies['delivery'][wateryear] < 0.0:
recharged_recovery = self.annual_supplies['delivery'][wateryear]
self.annual_supplies['delivery'][wateryear] = 0.0
self.annual_supplies['banked_accepted'][wateryear] = self.deliveries['recover_banked'][wateryear] + (self.deliveries['exchanged_GW'][wateryear] - self.deliveries['undelivered_trades'][wateryear]) - self.deliveries['exchanged_SW'][wateryear] + recharged_recovery
self.annual_supplies['leiu_applied'][wateryear] = self.deliveries['inleiu_irrigation'][wateryear]
self.annual_supplies['leiu_recharged'][wateryear] = self.deliveries['inleiu_recharge'][wateryear]
self.annual_supplies['leiu_delivered'][wateryear] = self.deliveries['leiupumping'][wateryear]
def accounting_leiubank(self,t, m, da, wateryear):
#takes banked storage (in in-leiu banks) and builds timeseries of member accounts
stacked_amount = 0.0
self.recharge_rate_series[t] = self.recharge_rate
for x in self.participant_list:
self.bank_timeseries[x][t] = self.inleiubanked[x] + stacked_amount
stacked_amount += self.inleiubanked[x]
if m == 9 and da == 30:
for x in self.participant_list:
sum_total = 0.0
for year_counter in range(0, wateryear):
sum_total += self.annual_timeseries[x][year_counter]
self.annual_timeseries[x][wateryear] = self.inleiubanked[x] - sum_total
def accounting_as_df(self, index):
#wirte district accounts and deliveries into a data fram
df = pd.DataFrame()
for n in self.daily_supplies:
df['%s_%s' % (self.key,n)] = pd.Series(self.daily_supplies[n], index = index)
return df
def accounting_as_df_full(self, index):
#wirte district accounts and deliveries into a data fram
df = pd.DataFrame()
for n in self.daily_supplies_full:
df['%s_%s' % (self.key,n)] = pd.Series(self.daily_supplies_full[n], index = index)
return df
def annual_results_as_df(self):
#wite annual district deliveries into a data frame
df = pd.DataFrame()
for n in self.annual_supplies:
df['%s_%s' % (self.key,n)] = pd.Series(self.annual_supplies[n])
return df
def bank_as_df(self, index):
#write leiubanking accounts (plus bank recharge rates) into a dataframe
df = pd.DataFrame()
for n in self.participant_list:
df['%s_%s_leiu' % (self.key,n)] = pd.Series(self.bank_timeseries[n], index = index)
df['%s_rate' % self.key] = pd.Series(self.recharge_rate_series, index = index)
return df
def annual_bank_as_df(self):
#write anmual banking changes into a data frame
df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 8 06:36:46 2020
@author: rolly
"""
import pandas
from validate_email import validate_email
from sqlalchemy import create_engine
import config
class Filetodb(object):
def __init__(self):
self.unregemail=[]
self.unregphone=[]
def getCSVSeparator(self,filename):
with open(filename) as f:
line = f.readline()
separators=[',',';','|']
for sep in separators:
if line.find(sep) > 1:
separator = sep
break
return separator
def openFile(self,filename):
try:
converters = {col: str for col in (6, 7)}
df = pandas.read_excel(filename, sheet_name=0, converters=converters)
except:
#separator detection
sep=self.getCSVSeparator(filename)
df = | pandas.read_csv(filename,sep=sep) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
"""
make_herbarium_2022_catalog_df.py
"""
#
# Description:
#
# Created On: Sunday Feb 27th, 2022
# Created By: <NAME>
# ### Key constants
# DATASETS_ROOT = "/media/data_cifs/projects/prj_fossils/data/processed_data/leavesdb-v1_1/images"
# EXTANT_ROOT = "/media/data_cifs/projects/prj_fossils/data/processed_data/leavesdb-v1_1/images/Extant_Leaves/original/full/jpg"
# GENERAL_ROOT = "/media/data_cifs/projects/prj_fossils/data/processed_data/leavesdb-v1_1/images/Fossil/General_Fossil/original/full/jpg"
# FLORISSANT_ROOT = "/media/data_cifs/projects/prj_fossils/data/processed_data/leavesdb-v1_1/images/Fossil/Florissant_Fossil/original/full/jpg"
# with open(os.path.join(HERBARIUM_ROOT, "train_metadata.json")) as fp:
# train_data = json.load(fp)
# with open(os.path.join(HERBARIUM_ROOT, "test_metadata.json")) as fp:
# test_data = json.load(fp)
# for k,v in train_data.items():
# print(k, f"| Total:{len(v)}")
# print("First:", v[0])
# print("Last:", v[-1])
# print("="*15+"\n")
# assert len(train_data["annotations"]) == len(train_data["images"])
import argparse
import os
import sys
from typing import *
import json
import pandas as pd
from pathlib import Path
import seaborn as sns
import matplotlib.pyplot as plt
from rich import print as pp
# HERBARIUM_ROOT_DEFAULT = "/media/data_cifs/projects/prj_fossils/data/raw_data/herbarium-2022-fgvc9_resize"
# from dotenv import load_dotenv
# load_dotenv()
import imutils
from imutils.big.split_catalog_utils import TRAIN_KEY, VAL_KEY, TEST_KEY
HERBARIUM_ROOT_DEFAULT = os.environ["HERBARIUM_ROOT_DEFAULT"]
CATALOG_DIR = os.environ["CATALOG_DIR"]
SPLITS_DIR = os.environ["SPLITS_DIR"]
def optimize_dtypes_train(df: pd.DataFrame) -> pd.DataFrame:
"""
Convert column dtypes to optimal type for herbarium train metadata df.
"""
# Reduce total df size by optimizing dtypes per column
cat_cols = ['genus_id', 'institution_id', 'category_id',
'scientificName', 'family', 'genus', 'species','Species',
'collectionCode', 'license', 'authors']
if "y" in df.columns:
cat_cols.append("y")
str_cols = ['image_id', 'file_name', 'path']
col_dtypes = {c:"category" for c in cat_cols if c in df.columns}
col_dtypes.update({c:"string" for c in str_cols})
# df = df.convert_dtypes()
df = df.astype(col_dtypes)
return df
def optimize_dtypes_test(df: pd.DataFrame) -> pd.DataFrame:
"""
Convert column dtypes to optimal type for herbarium test metadata df.
"""
dtypes_test = {'image_id':"string",
'file_name':"string",
'license':"category",
'path':"string"}
dtypes_test= {col:dtype for col, dtype in dtypes_test.items() if col in df.columns}
# Reduce total df size by optimizing dtypes per column
df = df.astype(dtypes_test)
return df
def read_train_df_from_csv(train_path,
nrows: Optional[int]=None,
index_col: int=0
) -> pd.DataFrame:
df = pd.read_csv(train_path, index_col=index_col, nrows=nrows)
df = optimize_dtypes_train(df)
return df
def read_test_df_from_csv(test_path,
nrows: Optional[int]=None,
index_col: int=0
) -> pd.DataFrame:
df = pd.read_csv(test_path, index_col=index_col, nrows=nrows)
df = optimize_dtypes_test(df)
return df
def read_all_from_csv(root_dir: str=None,
source_csv_paths: Optional[List[str]]=None,
subset_read_funcs: Union[Callable, Dict[str, Callable]]={
TRAIN_KEY: read_train_df_from_csv,
TEST_KEY: read_test_df_from_csv
},
return_dict: bool=False,
**kwargs) -> Tuple[pd.DataFrame]:
"""
Read the train_metadata.csv and test_metadata.csv files from `root_dir`
Note: This is prior to any train-val splits.
"""
if source_csv_paths is not None:
train_path, test_path = sorted(source_csv_paths)[::-1]
else:
train_path = Path(root_dir, "train_metadata.csv")
test_path = Path(root_dir, "test_metadata.csv")
if isinstance(subset_read_funcs, Callable):
train_df = subset_read_funcs(train_path)
test_df = subset_read_funcs(test_path)
else:
train_df = subset_read_funcs[TRAIN_KEY](train_path)
test_df = subset_read_funcs[TEST_KEY](test_path)
# train_df = read_train_df_from_csv(train_path)
# test_df = read_test_df_from_csv(test_path)
if return_dict:
return {
TRAIN_KEY: train_df,
TEST_KEY: test_df
}
return train_df, test_df
# read_train_df_from_csv,
# read_test_df_from_csv
###################################
###################################
class HerbariumMetadata:
TRAIN_KEYS = ['annotations', 'images', 'categories', 'genera', 'institutions', 'distances', 'license']
TEST_KEYS = ['image_id', 'file_name', 'license']
def __init__(self,
herbarium_root: str=HERBARIUM_ROOT_DEFAULT):
self.herbarium_root = herbarium_root
def get_train_df(self) -> pd.DataFrame:
metadata_path = Path(self.herbarium_root, "train_metadata.json")
with open(os.path.join(metadata_path)) as fp:
train_data = json.load(fp)
assert all([k in train_data.keys() for k in self.TRAIN_KEYS])
train_annotations = pd.DataFrame(train_data['annotations'])
train_categories = pd.DataFrame(train_data['categories']).set_index("category_id")
train_genera = pd.DataFrame(train_data['genera']).set_index("genus_id")
train_institutions = pd.DataFrame(train_data['institutions']).set_index("institution_id")
train_images = pd.DataFrame(train_data['images']).set_index("image_id")
df_train = pd.merge(train_annotations, train_images, how="left", right_index=True, left_on="image_id")
df_train = | pd.merge(df_train, train_categories, how="left", right_index=True, left_on="category_id") | pandas.merge |
import pandas as pd
import pdfplumber
import os
import sys
import numpy as np
import glob
import re
"""
Base path
"""
path = "/home/adam/downloads/ufo_files/"
"""
Output Headers
Each file contains the same columns but the headers are slightly different,
conform here
"""
headers = ['Date','Time','Town / Village','Area','Occupation (Where Relevant)','Description','Page','File','ReportYear']
"""
Some of the PDF pages do not have a bottom line at the end of the table on the first page
PDFPlumber can't identify the end of the table so it takes the penultimate row as the final
row of the table, add the filename and position of the line at the bottom of the page
"""
line = {"UFOReports2006WholeoftheUK.pdf":521,"UFOReports2004WholeoftheUK.pdf":552.875}
"""
Some of the PDF documents repeat the header on each page. Add the document
title to this list to use the headers from the table on the first page and
drop the headers from the rest of the tables
"""
drop = ["ufo_report_2008.pdf",
"UFOReport2000.pdf",
"UFOReport1999.pdf",
"UFOReport1998.pdf"]
"""Function make_df()
Args:
page_number (int): The page number being processed.
table (list): List representing the table as prepared by PDFPlumber.
dropheader (bool): used to denote files that have repeated headers
Returns:
df: pandas dataframe containing the table data. Page number is added, empty rows are removed, tabs and new line are removed
"""
def make_df(page_number,table,dropheader=False):
df = | pd.DataFrame(table) | pandas.DataFrame |
# we borrow the code from https://github.com/sjy1203/GAMENet
import dill
import pandas as pd
import numpy as np
med_file = 'data/PRESCRIPTIONS.csv'
diag_file = 'data/DIAGNOSES_ICD.csv'
procedure_file = 'data/PROCEDURES_ICD.csv'
ndc2atc_file = 'data/ndc2atc_level4.csv'
cid_atc = 'data/drug-atc.csv'
ndc2rxnorm_file = 'data/ndc2rxnorm_mapping.txt'
voc_file = 'data/voc_final.pkl'
data_path = 'data/records_final.pkl'
# drug-drug interactions can be down https://www.dropbox.com/s/8os4pd2zmp2jemd/drug-DDI.csv?dl=0
ddi_file = 'data/drug-DDI.csv'
def process_procedure():
pro_pd = | pd.read_csv(procedure_file, dtype={'ICD9_CODE': 'category'}) | pandas.read_csv |
'''
Created on Jul 16, 2019
@author: vincentiusmartin
'''
import pandas as pd
from sitesfinder.imads import iMADS
from sitesfinder.imadsmodel import iMADSModel
from sitesfinder.plotcombiner import PlotCombiner
from sitesfinder.pbmescore import PBMEscore
from sitesfinder.sequence import Sequence
from sitesfinder.prediction.basepred import BasePrediction
import pickle
from cooperative import coopfilter
def main():
curdir = "/Users/vincentiusmartin/Research/chip2gcPBM/"
analysis_path = curdir + "result/ets1_HepG2/analysis_result/"
with open(analysis_path + "sitefiles_list.txt", 'r') as f:
sitelist = [line.strip() for line in f.readlines()]
slist = "/Users/vincentiusmartin/Research/chip2gcPBM/chip2probe/../result/ets1_A549/analysis_result/sites_within_d3_span50.tsv"
seqdf = pd.read_csv(slist, sep='\t')
escore_short_path = "/Users/vincentiusmartin/Research/chip2gcPBM/resources/escores/ets1_escores.txt"
escore_map_path = "/Users/vincentiusmartin/Research/chip2gcPBM/resources/escores/index_short_to_long.csv"
escore = PBMEscore(escore_short_path, escore_map_path)
es_preds = escore.predict_sequences(seqdf)
esplots = escore.plot(es_preds)
"""
modelcores = ["GGAA", "GGAT"]
modelpaths = ["/Users/vincentiusmartin/Research/chip2gcPBM/resources/imads_preds/models/ets1/ETS1_100nM_Bound_filtered_normalized_transformed_20bp_GGAA_1a2a3mer_format.model",
"/Users/vincentiusmartin/Research/chip2gcPBM/resources/imads_preds/models/ets1/ETS1_100nM_Bound_filtered_normalized_transformed_20bp_GGAT_1a2a3mer_format.model"]
models = [iMADSModel(modelpath, modelcore, 20, [1,2,3]) for modelpath, modelcore in zip(modelpaths, modelcores)]
ims = iMADS(models, 0.2128) # 0.2128 is for the ETS1 cutoff
ims_preds = ims.predict_sequences(seqdf)
imadsplots = ims.plot(ims_preds)
pc = PlotCombiner()
pc.plot_seq_combine([imadsplots,esplots], filepath="plot.pdf")
filtered_sites = {}
print("Site filtering...")
for key in es_preds:
bs = Sequence(es_preds[key],ims_preds[key])
if bs.site_count() == 2:
filtered_sites[key] = bs
#site_list = [{**{"key":site, "sequence":filtered_sites[site].sequence},**filtered_sites[site].get_sites_dict()} for site in filtered_sites]
#columns = ["key", "site_start_1", "site_start_2", "site_end_1", "site_end_2", "site_pos_1", "site_pos_2", "imads_score_1", "imads_score_2", "sequence"
"""
#pickle.dump(filtered_sites, open("test_fsites2.pickle","wb"))
filtered_sites = pickle.load(open("test_fsites2.pickle","rb"))
pc = PlotCombiner()
###
seqdict = {}
funcdict = {}
filtered_probes = []
# TODO: tmr look at 110,271
for key in filtered_sites:
#for key in ["sequence483"]:
# Visualization part
seqdict["%s-wt" % key] = filtered_sites[key].sequence
for idx,mut in enumerate([[0],[1],[0,1]]):
mutseq = filtered_sites[key].abolish_sites(mut,escore)
seqdict["%s-m%d" % (key,idx + 1)] = mutseq.sequence
funcdict["%s-m%d" % (key,idx + 1)] = mutseq.plot_functions
if coopfilter.filter_coopseq(seqdict["%s-wt"%key], seqdict["%s-m1"%key],
seqdict["%s-m2"%key], seqdict["%s-m3"%key],
filtered_sites[key].get_sites_dict(), escore):
filtered_probes.append({"key":key, "wt":seqdict["%s-wt"%key], "m1":seqdict["%s-m1"%key],
"m2":seqdict["%s-m2"%key], "m3":seqdict["%s-m3"%key]})
pp = escore.plot(escore.predict_sequences(seqdict),additional_functions=funcdict)
pc.plot_seq_combine([pp], filepath="plot-mut.pdf")
| pd.DataFrame(filtered_probes) | pandas.DataFrame |
"""Build industry sector ratios."""
import pandas as pd
# GWh/ktoe OR MWh/toe
toe_to_MWh = 11.630
eu28 = [
"FR",
"DE",
"GB",
"IT",
"ES",
"PL",
"SE",
"NL",
"BE",
"FI",
"DK",
"PT",
"RO",
"AT",
"BG",
"EE",
"GR",
"LV",
"CZ",
"HU",
"IE",
"SK",
"LT",
"HR",
"LU",
"SI",
"CY",
"MT",
]
sheet_names = {
"Iron and steel": "ISI",
"Chemicals Industry": "CHI",
"Non-metallic mineral products": "NMM",
"Pulp, paper and printing": "PPA",
"Food, beverages and tobacco": "FBT",
"Non Ferrous Metals": "NFM",
"Transport Equipment": "TRE",
"Machinery Equipment": "MAE",
"Textiles and leather": "TEL",
"Wood and wood products": "WWP",
"Other Industrial Sectors": "OIS",
}
index = [
"elec",
"coal",
"coke",
"biomass",
"methane",
"hydrogen",
"heat",
"naphtha",
"process emission",
"process emission from feedstock",
]
def load_idees_data(sector, country="EU28"):
suffixes = {"out": "", "fec": "_fec", "ued": "_ued", "emi": "_emi"}
sheets = {k: sheet_names[sector] + v for k, v in suffixes.items()}
def usecols(x):
return isinstance(x, str) or x == year
idees = pd.read_excel(
f"{snakemake.input.idees}/JRC-IDEES-2015_Industry_{country}.xlsx",
sheet_name=list(sheets.values()),
index_col=0,
header=0,
squeeze=True,
usecols=usecols,
)
for k, v in sheets.items():
idees[k] = idees.pop(v)
return idees
def iron_and_steel():
# There are two different approaches to produce iron and steel:
# i.e., integrated steelworks and electric arc.
# Electric arc approach has higher efficiency and relies more on electricity.
# We assume that integrated steelworks will be replaced by electric arc entirely.
sector = "Iron and steel"
idees = load_idees_data(sector)
df = pd.DataFrame(index=index)
## Electric arc
sector = "Electric arc"
df[sector] = 0.0
s_fec = idees["fec"][51:57]
assert s_fec.index[0] == sector
sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"]
df.at["elec", sector] += s_fec[sel].sum()
df.at["heat", sector] += s_fec["Low enthalpy heat"]
subsector = "Steel: Smelters"
s_fec = idees["fec"][61:67]
s_ued = idees["ued"][61:67]
assert s_fec.index[0] == subsector
assert s_ued.index[0] == subsector
# efficiency changes due to transforming all the smelters into methane
key = "Natural gas (incl. biogas)"
eff_met = s_ued[key] / s_fec[key]
df.at["methane", sector] += s_ued[subsector] / eff_met
subsector = "Steel: Electric arc"
s_fec = idees["fec"][67:68]
assert s_fec.index[0] == subsector
df.at["elec", sector] += s_fec[subsector]
subsector = "Steel: Furnaces, Refining and Rolling"
s_fec = idees["fec"][68:75]
s_ued = idees["ued"][68:75]
assert s_fec.index[0] == subsector
assert s_ued.index[0] == subsector
key = "Steel: Furnaces, Refining and Rolling - Electric"
eff = s_ued[key] / s_fec[key]
# assume fully electrified, other processes scaled by used energy
df.at["elec", sector] += s_ued[subsector] / eff
subsector = "Steel: Products finishing"
s_fec = idees["fec"][75:92]
s_ued = idees["ued"][75:92]
assert s_fec.index[0] == subsector
assert s_ued.index[0] == subsector
key = "Steel: Products finishing - Electric"
eff = s_ued[key] / s_fec[key]
# assume fully electrified
df.at["elec", sector] += s_ued[subsector] / eff
# Process emissions (per physical output)
s_emi = idees["emi"][51:93]
assert s_emi.index[0] == sector
s_out = idees["out"][7:8]
assert s_out.index[0] == sector
# tCO2/t material
df.loc["process emission", sector] += s_emi["Process emissions"] / s_out[sector]
# final energy consumption MWh/t material
sel = ["elec", "heat", "methane"]
df.loc[sel, sector] = df.loc[sel, sector] * toe_to_MWh / s_out[sector]
## DRI + Electric arc
# For primary route: DRI with H2 + EAF
sector = "DRI + Electric arc"
df[sector] = df["Electric arc"]
# add H2 consumption for DRI at 1.7 MWh H2 /ton steel
df.at["hydrogen", sector] = config["H2_DRI"]
# add electricity consumption in DRI shaft (0.322 MWh/tSl)
df.at["elec", sector] += config["elec_DRI"]
## Integrated steelworks
# could be used in combination with CCS)
# Assume existing fuels are kept, except for furnaces, refining, rolling, finishing
# Ignore 'derived gases' since these are top gases from furnaces
sector = "Integrated steelworks"
df[sector] = 0.0
s_fec = idees["fec"][3:9]
assert s_fec.index[0] == sector
sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"]
df.loc["elec", sector] += s_fec[sel].sum()
df.loc["heat", sector] += s_fec["Low enthalpy heat"]
subsector = "Steel: Sinter/Pellet making"
s_fec = idees["fec"][13:19]
s_ued = idees["ued"][13:19]
assert s_fec.index[0] == subsector
assert s_ued.index[0] == subsector
df.loc["elec", sector] += s_fec["Electricity"]
sel = ["Natural gas (incl. biogas)", "Residual fuel oil"]
df.loc["methane", sector] += s_fec[sel].sum()
df.loc["coal", sector] += s_fec["Solids"]
subsector = "Steel: Blast /Basic oxygen furnace"
s_fec = idees["fec"][19:25]
s_ued = idees["ued"][19:25]
assert s_fec.index[0] == subsector
assert s_ued.index[0] == subsector
sel = ["Natural gas (incl. biogas)", "Residual fuel oil"]
df.loc["methane", sector] += s_fec[sel].sum()
df.loc["coal", sector] += s_fec["Solids"]
df.loc["coke", sector] = s_fec["Coke"]
subsector = "Steel: Furnaces, Refining and Rolling"
s_fec = idees["fec"][25:32]
s_ued = idees["ued"][25:32]
assert s_fec.index[0] == subsector
assert s_ued.index[0] == subsector
key = "Steel: Furnaces, Refining and Rolling - Electric"
eff = s_ued[key] / s_fec[key]
# assume fully electrified, other processes scaled by used energy
df.loc["elec", sector] += s_ued[subsector] / eff
subsector = "Steel: Products finishing"
s_fec = idees["fec"][32:49]
s_ued = idees["ued"][32:49]
assert s_fec.index[0] == subsector
assert s_ued.index[0] == subsector
key = "Steel: Products finishing - Electric"
eff = s_ued[key] / s_fec[key]
# assume fully electrified
df.loc["elec", sector] += s_ued[subsector] / eff
# Process emissions (per physical output)
s_emi = idees["emi"][3:50]
assert s_emi.index[0] == sector
s_out = idees["out"][6:7]
assert s_out.index[0] == sector
# tCO2/t material
df.loc["process emission", sector] = s_emi["Process emissions"] / s_out[sector]
# final energy consumption MWh/t material
sel = ["elec", "heat", "methane", "coke", "coal"]
df.loc[sel, sector] = df.loc[sel, sector] * toe_to_MWh / s_out[sector]
return df
def chemicals_industry():
sector = "Chemicals Industry"
idees = load_idees_data(sector)
df = pd.DataFrame(index=index)
# Basic chemicals
sector = "Basic chemicals"
df[sector] = 0.0
s_fec = idees["fec"][3:9]
assert s_fec.index[0] == sector
sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"]
df.loc["elec", sector] += s_fec[sel].sum()
df.loc["heat", sector] += s_fec["Low enthalpy heat"]
subsector = "Chemicals: Feedstock (energy used as raw material)"
# There are Solids, Refinery gas, LPG, Diesel oil, Residual fuel oil,
# Other liquids, Naphtha, Natural gas for feedstock.
# Naphta represents 47%, methane 17%. LPG (18%) solids, refinery gas,
# diesel oil, residual fuel oils and other liquids are asimilated to Naphtha
s_fec = idees["fec"][13:22]
assert s_fec.index[0] == subsector
df.loc["naphtha", sector] += s_fec["Naphtha"]
df.loc["methane", sector] += s_fec["Natural gas"]
# LPG and other feedstock materials are assimilated to naphtha
# since they will be produced through Fischer-Tropsh process
sel = [
"Solids",
"Refinery gas",
"LPG",
"Diesel oil",
"Residual fuel oil",
"Other liquids",
]
df.loc["naphtha", sector] += s_fec[sel].sum()
subsector = "Chemicals: Steam processing"
# All the final energy consumption in the steam processing is
# converted to methane, since we need >1000 C temperatures here.
# The current efficiency of methane is assumed in the conversion.
s_fec = idees["fec"][22:33]
s_ued = idees["ued"][22:33]
assert s_fec.index[0] == subsector
assert s_ued.index[0] == subsector
# efficiency of natural gas
eff_ch4 = s_ued["Natural gas (incl. biogas)"] / s_fec["Natural gas (incl. biogas)"]
# replace all fec by methane
df.loc["methane", sector] += s_ued[subsector] / eff_ch4
subsector = "Chemicals: Furnaces"
s_fec = idees["fec"][33:41]
s_ued = idees["ued"][33:41]
assert s_fec.index[0] == subsector
assert s_ued.index[0] == subsector
# efficiency of electrification
key = "Chemicals: Furnaces - Electric"
eff_elec = s_ued[key] / s_fec[key]
# assume fully electrified
df.loc["elec", sector] += s_ued[subsector] / eff_elec
subsector = "Chemicals: Process cooling"
s_fec = idees["fec"][41:55]
s_ued = idees["ued"][41:55]
assert s_fec.index[0] == subsector
assert s_ued.index[0] == subsector
key = "Chemicals: Process cooling - Electric"
eff_elec = s_ued[key] / s_fec[key]
# assume fully electrified
df.loc["elec", sector] += s_ued[subsector] / eff_elec
subsector = "Chemicals: Generic electric process"
s_fec = idees["fec"][55:56]
assert s_fec.index[0] == subsector
df.loc["elec", sector] += s_fec[subsector]
# Process emissions
# Correct everything by subtracting 2015's ammonia demand and
# putting in ammonia demand for H2 and electricity separately
s_emi = idees["emi"][3:57]
assert s_emi.index[0] == sector
# convert from MtHVC/a to ktHVC/a
s_out = config["HVC_production_today"] * 1e3
# tCO2/t material
df.loc["process emission", sector] += (
s_emi["Process emissions"]
- config["petrochemical_process_emissions"] * 1e3
- config["NH3_process_emissions"] * 1e3
) / s_out
# emissions originating from feedstock, could be non-fossil origin
# tCO2/t material
df.loc["process emission from feedstock", sector] += (
config["petrochemical_process_emissions"] * 1e3
) / s_out
# convert from ktoe/a to GWh/a
sources = ["elec", "biomass", "methane", "hydrogen", "heat", "naphtha"]
df.loc[sources, sector] *= toe_to_MWh
# subtract ammonia energy demand (in ktNH3/a)
ammonia = pd.read_csv(snakemake.input.ammonia_production, index_col=0)
ammonia_total = ammonia.loc[ammonia.index.intersection(eu28), str(year)].sum()
df.loc["methane", sector] -= ammonia_total * config["MWh_CH4_per_tNH3_SMR"]
df.loc["elec", sector] -= ammonia_total * config["MWh_elec_per_tNH3_SMR"]
# subtract chlorine demand
chlorine_total = config["chlorine_production_today"]
df.loc["hydrogen", sector] -= chlorine_total * config["MWh_H2_per_tCl"]
df.loc["elec", sector] -= chlorine_total * config["MWh_elec_per_tCl"]
# subtract methanol demand
methanol_total = config["methanol_production_today"]
df.loc["methane", sector] -= methanol_total * config["MWh_CH4_per_tMeOH"]
df.loc["elec", sector] -= methanol_total * config["MWh_elec_per_tMeOH"]
# MWh/t material
df.loc[sources, sector] = df.loc[sources, sector] / s_out
df.rename(columns={sector: "HVC"}, inplace=True)
# HVC mechanical recycling
sector = "HVC (mechanical recycling)"
df[sector] = 0.0
df.loc["elec", sector] = config["MWh_elec_per_tHVC_mechanical_recycling"]
# HVC chemical recycling
sector = "HVC (chemical recycling)"
df[sector] = 0.0
df.loc["elec", sector] = config["MWh_elec_per_tHVC_chemical_recycling"]
# Ammonia
sector = "Ammonia"
df[sector] = 0.0
df.loc["hydrogen", sector] = config["MWh_H2_per_tNH3_electrolysis"]
df.loc["elec", sector] = config["MWh_elec_per_tNH3_electrolysis"]
# Chlorine
sector = "Chlorine"
df[sector] = 0.0
df.loc["hydrogen", sector] = config["MWh_H2_per_tCl"]
df.loc["elec", sector] = config["MWh_elec_per_tCl"]
# Methanol
sector = "Methanol"
df[sector] = 0.0
df.loc["methane", sector] = config["MWh_CH4_per_tMeOH"]
df.loc["elec", sector] = config["MWh_elec_per_tMeOH"]
# Other chemicals
sector = "Other chemicals"
df[sector] = 0.0
s_fec = idees["fec"][58:64]
assert s_fec.index[0] == sector
sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"]
df.loc["elec", sector] += s_fec[sel].sum()
df.loc["heat", sector] += s_fec["Low enthalpy heat"]
subsector = "Chemicals: High enthalpy heat processing"
s_fec = idees["fec"][68:81]
s_ued = idees["ued"][68:81]
assert s_fec.index[0] == subsector
assert s_ued.index[0] == subsector
key = "High enthalpy heat processing - Electric (microwave)"
eff_elec = s_ued[key] / s_fec[key]
# assume fully electrified
df.loc["elec", sector] += s_ued[subsector] / eff_elec
subsector = "Chemicals: Furnaces"
s_fec = idees["fec"][81:89]
s_ued = idees["ued"][81:89]
assert s_fec.index[0] == subsector
assert s_ued.index[0] == subsector
key = "Chemicals: Furnaces - Electric"
eff_elec = s_ued[key] / s_fec[key]
# assume fully electrified
df.loc["elec", sector] += s_ued[subsector] / eff_elec
subsector = "Chemicals: Process cooling"
s_fec = idees["fec"][89:103]
s_ued = idees["ued"][89:103]
assert s_fec.index[0] == subsector
assert s_ued.index[0] == subsector
key = "Chemicals: Process cooling - Electric"
eff = s_ued[key] / s_fec[key]
# assume fully electrified
df.loc["elec", sector] += s_ued[subsector] / eff
subsector = "Chemicals: Generic electric process"
s_fec = idees["fec"][103:104]
assert s_fec.index[0] == subsector
df.loc["elec", sector] += s_fec[subsector]
# Process emissions
s_emi = idees["emi"][58:105]
s_out = idees["out"][9:10]
assert s_emi.index[0] == sector
assert sector in str(s_out.index)
# tCO2/t material
df.loc["process emission", sector] += s_emi["Process emissions"] / s_out.values
# MWh/t material
sources = ["elec", "biomass", "methane", "hydrogen", "heat", "naphtha"]
df.loc[sources, sector] = df.loc[sources, sector] * toe_to_MWh / s_out.values
# Pharmaceutical products
sector = "Pharmaceutical products etc."
df[sector] = 0.0
s_fec = idees["fec"][106:112]
assert s_fec.index[0] == sector
sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"]
df.loc["elec", sector] += s_fec[sel].sum()
df.loc["heat", sector] += s_fec["Low enthalpy heat"]
subsector = "Chemicals: High enthalpy heat processing"
s_fec = idees["fec"][116:129]
s_ued = idees["ued"][116:129]
assert s_fec.index[0] == subsector
assert s_ued.index[0] == subsector
key = "High enthalpy heat processing - Electric (microwave)"
eff_elec = s_ued[key] / s_fec[key]
# assume fully electrified
df.loc["elec", sector] += s_ued[subsector] / eff_elec
subsector = "Chemicals: Furnaces"
s_fec = idees["fec"][129:137]
s_ued = idees["ued"][129:137]
assert s_fec.index[0] == subsector
assert s_ued.index[0] == subsector
key = "Chemicals: Furnaces - Electric"
eff = s_ued[key] / s_fec[key]
# assume fully electrified
df.loc["elec", sector] += s_ued[subsector] / eff
subsector = "Chemicals: Process cooling"
s_fec = idees["fec"][137:151]
s_ued = idees["ued"][137:151]
assert s_fec.index[0] == subsector
assert s_ued.index[0] == subsector
key = "Chemicals: Process cooling - Electric"
eff_elec = s_ued[key] / s_fec[key]
# assume fully electrified
df.loc["elec", sector] += s_ued[subsector] / eff_elec
subsector = "Chemicals: Generic electric process"
s_fec = idees["fec"][151:152]
s_out = idees["out"][10:11]
assert s_fec.index[0] == subsector
assert sector in str(s_out.index)
df.loc["elec", sector] += s_fec[subsector]
# tCO2/t material
df.loc["process emission", sector] += 0.0
# MWh/t material
sources = ["elec", "biomass", "methane", "hydrogen", "heat", "naphtha"]
df.loc[sources, sector] = df.loc[sources, sector] * toe_to_MWh / s_out.values
return df
def nonmetalic_mineral_products():
# This includes cement, ceramic and glass production.
# This includes process emissions related to the fabrication of clinker.
sector = "Non-metallic mineral products"
idees = load_idees_data(sector)
df = pd.DataFrame(index=index)
# Cement
# This sector has process-emissions.
# Includes three subcategories:
# (a) Grinding, milling of raw material,
# (b) Pre-heating and pre-calcination,
# (c) clinker production (kilns),
# (d) Grinding, packaging.
# (b)+(c) represent 94% of fec. So (a) is joined to (b) and (d) is joined to (c).
# Temperatures above 1400C are required for procesing limestone and sand into clinker.
# Everything (except current electricity and heat consumption and existing biomass)
# is transformed into methane for high T.
sector = "Cement"
df[sector] = 0.0
s_fec = idees["fec"][3:25]
s_ued = idees["ued"][3:25]
assert s_fec.index[0] == sector
assert s_ued.index[0] == sector
sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"]
df.loc["elec", sector] += s_fec[sel].sum()
df.loc["heat", sector] += s_fec["Low enthalpy heat"]
# pre-processing: keep existing elec and biomass, rest to methane
df.loc["elec", sector] += s_fec["Cement: Grinding, milling of raw material"]
df.loc["biomass", sector] += s_fec["Biomass"]
df.loc["methane", sector] += (
s_fec["Cement: Pre-heating and pre-calcination"] - s_fec["Biomass"]
)
subsector = "Cement: Clinker production (kilns)"
s_fec = idees["fec"][34:43]
s_ued = idees["ued"][34:43]
assert s_fec.index[0] == subsector
assert s_ued.index[0] == subsector
df.loc["biomass", sector] += s_fec["Biomass"]
df.loc["methane", sector] += (
s_fec["Cement: Clinker production (kilns)"] - s_fec["Biomass"]
)
df.loc["elec", sector] += s_fec["Cement: Grinding, packaging"]
# Process emissions
# come from calcination of limestone to chemically reactive calcium oxide (lime).
# Calcium carbonate -> lime + CO2
# CaCO3 -> CaO + CO2
s_emi = idees["emi"][3:44]
assert s_emi.index[0] == sector
s_out = idees["out"][7:8]
assert sector in str(s_out.index)
# tCO2/t material
df.loc["process emission", sector] += s_emi["Process emissions"] / s_out.values
# MWh/t material
sources = ["elec", "biomass", "methane", "hydrogen", "heat", "naphtha"]
df.loc[sources, sector] = df.loc[sources, sector] * toe_to_MWh / s_out.values
# Ceramics & other NMM
# This sector has process emissions.
# Includes four subcategories:
# (a) Mixing of raw material,
# (b) Drying and sintering of raw material,
# (c) Primary production process,
# (d) Product finishing.
# (b) represents 65% of fec and (a) 4%. So (a) is joined to (b).
# Everything is electrified
sector = "Ceramics & other NMM"
df[sector] = 0.0
s_fec = idees["fec"][45:94]
s_ued = idees["ued"][45:94]
assert s_fec.index[0] == sector
assert s_ued.index[0] == sector
sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"]
df.loc["elec", sector] += s_fec[sel].sum()
df.loc["heat", sector] += s_fec["Low enthalpy heat"]
# Efficiency changes due to electrification
key = "Ceramics: Microwave drying and sintering"
eff_elec = s_ued[key] / s_fec[key]
sel = [
"Ceramics: Mixing of raw material",
"Ceramics: Drying and sintering of raw material",
]
df.loc["elec", sector] += s_ued[sel].sum() / eff_elec
key = "Ceramics: Electric kiln"
eff_elec = s_ued[key] / s_fec[key]
df.loc["elec", sector] += s_ued["Ceramics: Primary production process"] / eff_elec
key = "Ceramics: Electric furnace"
eff_elec = s_ued[key] / s_fec[key]
df.loc["elec", sector] += s_ued["Ceramics: Product finishing"] / eff_elec
s_emi = idees["emi"][45:94]
assert s_emi.index[0] == sector
s_out = idees["out"][8:9]
assert sector in str(s_out.index)
# tCO2/t material
df.loc["process emission", sector] += s_emi["Process emissions"] / s_out.values
# MWh/t material
sources = ["elec", "biomass", "methane", "hydrogen", "heat", "naphtha"]
df.loc[sources, sector] = df.loc[sources, sector] * toe_to_MWh / s_out.values
# Glass production
# This sector has process emissions.
# Includes four subcategories:
# (a) Melting tank
# (b) Forming
# (c) Annealing
# (d) Finishing processes.
# (a) represents 73%. (b), (d) are joined to (c).
# Everything is electrified.
sector = "Glass production"
df[sector] = 0.0
s_fec = idees["fec"][95:123]
s_ued = idees["ued"][95:123]
assert s_fec.index[0] == sector
assert s_ued.index[0] == sector
sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"]
df.loc["elec", sector] += s_fec[sel].sum()
df.loc["heat", sector] += s_fec["Low enthalpy heat"]
# Efficiency changes due to electrification
key = "Glass: Electric melting tank"
eff_elec = s_ued[key] / s_fec[key]
df.loc["elec", sector] += s_ued["Glass: Melting tank"] / eff_elec
key = "Glass: Annealing - electric"
eff_elec = s_ued[key] / s_fec[key]
sel = ["Glass: Forming", "Glass: Annealing", "Glass: Finishing processes"]
df.loc["elec", sector] += s_ued[sel].sum() / eff_elec
s_emi = idees["emi"][95:124]
assert s_emi.index[0] == sector
s_out = idees["out"][9:10]
assert sector in str(s_out.index)
# tCO2/t material
df.loc["process emission", sector] += s_emi["Process emissions"] / s_out.values
# MWh/t material
sources = ["elec", "biomass", "methane", "hydrogen", "heat", "naphtha"]
df.loc[sources, sector] = df.loc[sources, sector] * toe_to_MWh / s_out.values
return df
def pulp_paper_printing():
# Pulp, paper and printing can be completely electrified.
# There are no process emissions associated to this sector.
sector = "Pulp, paper and printing"
idees = load_idees_data(sector)
df = | pd.DataFrame(index=index) | pandas.DataFrame |
## Providing pathway (reactome) genes, gene-UniProtID mapping, gene-Ensembl ID mapping
import pandas as pd
import numpy as np
import scipy.stats as stat
from collections import defaultdict
import os, time
# REACTOME genes
def reactome_genes(): # provide in a dictionary
output = defaultdict(list)
output_list = []
f = open('../data/msigdb.v6.1.symbols.gmt.txt','r')
for line in f.xreadlines():
line = line.strip().split('\t')
if 'REACTOME' in line[0]:
reactome = line[0]
output_list.append(reactome)
for i in range(2, len(line)):
gene = line[i]
output[reactome].append(gene)
f.close()
return output
def reactome_genes_uniprot():
output, reactome = defaultdict(list), reactome_genes()
gene2uniprot = geneID2uniprot()
for pathway in reactome:
for gene in reactome[pathway]:
if gene in gene2uniprot:
uniprot = gene2uniprot[gene]
if not uniprot in output[pathway]:
output[pathway].append(uniprot)
return output
## gene annotation conversion utilities
def convert_geneList_to_uniprotList( input_geneList ):
output = []
for gene in input_geneList:
if gene in gene2uniprot:
output.append(gene2uniprot[gene])
return list(set(output))
def convert_uniprotList_to_geneList( input_uniprotList ):
output = []
for uniprot in input_uniprotList:
if uniprot in uniprot2gene:
output.append(uniprot2gene[uniprot])
return list(set(output))
## gene annotation
# ensembl gene annotation
def annotation():
geneID2ensembl, ensembl2geneID = defaultdict(set), {}
df = | pd.read_csv('../data/2017_07_31_biomart_protein_coding_genes.txt', sep='\t') | pandas.read_csv |
import copy
import itertools
import os
import numpy as np
import pandas as pd
from pathlib import Path
from sklearn.preprocessing import PowerTransformer
from scipy.stats import yeojohnson
from tqdm import tqdm
import tensorflow as tf
import warnings
warnings.simplefilter("ignore")
n_wavelengths = 55
n_timesteps = 300
class read_Ariel_dataset():
def __init__(self, noisy_path_train, noisy_path_test, params_path, start_read):
"""
For reading Ariel Dataset.
:param noisy_path_train: (str) The *relative path's parent directory* from the current
working directory to all noisy training files. For local files start with "./", for
colab files alternatively start with "/content/" (and "./" works fine).
:param noisy_path_train: (str) The *relative path's parent directory* from the current
working directory to all noisy test files. For local files start with "./", for
colab files alternatively start with "/content/" (and "./" works fine).
:param params_path: (str) The *relative path's parent directory* from the current
working directory to all params files. For local files start with "./", for
colab files alternatively start with "/content/" (and "./" works fine).
:param start_read: (int) How many data points to replace at the beginning of the
file. Used for preprocessing of files by replacing values before start_read
with 1.0 to minimize impact of the drop valley.
"""
super().__init__()
self.noisy_path = noisy_path_train
self.noisy_path_test = noisy_path_test
self.params_path = params_path
self.start_read = start_read
# list all files in path(s).
self.noisy_list= os.listdir(self.noisy_path)
self.noisy_list_test = os.listdir(self.noisy_path_test)
self.params_list = os.listdir(self.params_path)
# Grouped by AAAA:
self.group_noisy_list = self._group_list(self.noisy_list)
self.group_noisy_list_test = self._group_list(self.noisy_list_test)
self.group_params_list = self._group_list(self.params_list)
def _group_list_return(self):
"""
Only used for unit test purposes.
Return self.group_noisy_list and assert it is true.
"""
return self.group_noisy_list
def _choose_train_or_test(self, folder="noisy_train", batch_size=1):
"""Private function to choose train or test.
:param batch_size (int): The batch size to take. NotImplemented yet.
"""
if folder == "noisy_train":
path = self.noisy_path
files = self.noisy_list
elif folder == "noisy_test":
path = self.noisy_path_test
files = self.noisy_list_test
else:
raise FileNotFoundError("Not in the list (noisy_train, noisy_test). "
"Please input the choices in the list stated and try again.")
return path, files
def _len_noisy_list(self):
return len(self.noisy_list)
def unoptimized_read_noisy(self, folder="noisy_train", **kwargs):
"""
Read noisy files greedily, stacking them on the first axis.
First axis is the time series axis. So a file with 300x55, read
3 files would be 900x55.
:param folder (str): Which folder to do baseline transition. Choices:
"noisy_train" (default), "noisy_test".
"""
path, files = self._choose_train_or_test(folder=folder, **kwargs)
predefined = pd.DataFrame()
for item in files:
# Concatenate filename and their parent folder.
relative_file_path = path + "/" + item
# Renaming the columns
names = [item[-14:-4] + f"_{i}" for i in range(n_timesteps)]
curr = | pd.read_csv(relative_file_path, delimiter="\t", skiprows=6, header=None) | pandas.read_csv |
import string
import pandas as pd
import numpy as np
import doctest
from texthero import preprocessing, stopwords
from . import PandasTestCase
"""
Test doctest
"""
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(preprocessing))
return tests
class TestPreprocessing(PandasTestCase):
"""
Test remove digits.
"""
def test_remove_digits_only_block(self):
s = pd.Series("remove block of digits 1234 h1n1")
s_true = pd.Series("remove block of digits h1n1")
self.assertEqual(preprocessing.remove_digits(s), s_true)
def test_remove_digits_any(self):
s = pd.Series("remove block of digits 1234 h1n1")
s_true = pd.Series("remove block of digits h n ")
self.assertEqual(preprocessing.remove_digits(s, only_blocks=False), s_true)
def test_remove_digits_brackets(self):
s = pd.Series("Digits in bracket (123 $) needs to be cleaned out")
s_true = pd.Series("Digits in bracket ( $) needs to be cleaned out")
self.assertEqual(preprocessing.remove_digits(s), s_true)
def test_remove_digits_start(self):
s = pd.Series("123 starting digits needs to be cleaned out")
s_true = pd.Series(" starting digits needs to be cleaned out")
self.assertEqual(preprocessing.remove_digits(s), s_true)
def test_remove_digits_end(self):
s = pd.Series("end digits needs to be cleaned out 123")
s_true = pd.Series("end digits needs to be cleaned out ")
self.assertEqual(preprocessing.remove_digits(s), s_true)
def test_remove_digits_phone(self):
s = pd.Series("+41 1234 5678")
s_true = pd.Series("+ ")
self.assertEqual(preprocessing.remove_digits(s), s_true)
def test_remove_digits_punctuation(self):
s = pd.Series(string.punctuation)
s_true = pd.Series(string.punctuation)
self.assertEqual(preprocessing.remove_digits(s), s_true)
"""
Test replace digits
"""
def test_replace_digits(self):
s = pd.Series("1234 falcon9")
s_true = pd.Series("X falcon9")
self.assertEqual(preprocessing.replace_digits(s, "X"), s_true)
def test_replace_digits_any(self):
s = pd.Series("1234 falcon9")
s_true = pd.Series("X falconX")
self.assertEqual(
preprocessing.replace_digits(s, "X", only_blocks=False), s_true
)
"""
Remove punctuation.
"""
def test_remove_punctation(self):
s = pd.Series("Remove all! punctuation!! ()")
s_true = pd.Series(
"Remove all punctuation "
) # TODO maybe just remove space?
self.assertEqual(preprocessing.remove_punctuation(s), s_true)
"""
Remove diacritics.
"""
def test_remove_diactitics(self):
s = pd.Series("Montréal, über, 12.89, Mère, Françoise, noël, 889, اِس, اُس")
s_true = pd.Series("Montreal, uber, 12.89, Mere, Francoise, noel, 889, اس, اس")
self.assertEqual(preprocessing.remove_diacritics(s), s_true)
"""
Remove whitespace.
"""
def test_remove_whitespace(self):
s = pd.Series("hello world hello world ")
s_true = pd.Series("hello world hello world")
self.assertEqual(preprocessing.remove_whitespace(s), s_true)
"""
Test pipeline.
"""
def test_pipeline_stopwords(self):
s = pd.Series("E-I-E-I-O\nAnd on")
s_true = pd.Series("e-i-e-i-o\n ")
pipeline = [preprocessing.lowercase, preprocessing.remove_stopwords]
self.assertEqual(preprocessing.clean(s, pipeline=pipeline), s_true)
"""
Test stopwords.
"""
def test_remove_stopwords(self):
text = "i am quite intrigued"
text_default_preprocessed = " quite intrigued"
text_spacy_preprocessed = " intrigued"
text_custom_preprocessed = "i quite "
self.assertEqual(
preprocessing.remove_stopwords(pd.Series(text)),
pd.Series(text_default_preprocessed),
)
self.assertEqual(
preprocessing.remove_stopwords(
pd.Series(text), stopwords=stopwords.SPACY_EN
),
pd.Series(text_spacy_preprocessed),
)
self.assertEqual(
preprocessing.remove_stopwords(
pd.Series(text), stopwords={"am", "intrigued"}
),
pd.Series(text_custom_preprocessed),
)
def test_stopwords_are_set(self):
self.assertEqual(type(stopwords.DEFAULT), set)
self.assertEqual(type(stopwords.NLTK_EN), set)
self.assertEqual(type(stopwords.SPACY_EN), set)
"""
Test remove html tags
"""
def test_remove_html_tags(self):
s = pd.Series("<html>remove <br>html</br> tags<html> ")
s_true = pd.Series("remove html tags ")
self.assertEqual(preprocessing.remove_html_tags(s), s_true)
"""
Text tokenization
"""
def test_tokenize(self):
s = pd.Series("text to tokenize")
s_true = pd.Series([["text", "to", "tokenize"]])
self.assertEqual(preprocessing.tokenize(s), s_true)
def test_tokenize_multirows(self):
s = pd.Series(["first row", "second row"])
s_true = pd.Series([["first", "row"], ["second", "row"]])
self.assertEqual(preprocessing.tokenize(s), s_true)
def test_tokenize_split_punctuation(self):
s = pd.Series(["ready. set, go!"])
s_true = pd.Series([["ready", ".", "set", ",", "go", "!"]])
self.assertEqual(preprocessing.tokenize(s), s_true)
def test_tokenize_not_split_in_between_punctuation(self):
s = pd.Series(["don't say hello-world"])
s_true = pd.Series([["don't", "say", "hello-world"]])
self.assertEqual(preprocessing.tokenize(s), s_true)
"""
Has content
"""
def test_has_content(self):
s = pd.Series(["c", np.nan, "\t\n", " ", "", "has content", None])
s_true = pd.Series([True, False, False, False, False, True, False])
self.assertEqual(preprocessing.has_content(s), s_true)
"""
Test remove urls
"""
def test_remove_urls(self):
s = pd.Series("http://tests.com http://www.tests.com")
s_true = pd.Series(" ")
self.assertEqual(preprocessing.remove_urls(s), s_true)
def test_remove_urls_https(self):
s = pd.Series("https://tests.com https://www.tests.com")
s_true = pd.Series(" ")
self.assertEqual(preprocessing.remove_urls(s), s_true)
def test_remove_urls_multiline(self):
s = pd.Series("https://tests.com \n https://tests.com")
s_true = pd.Series(" \n ")
self.assertEqual(preprocessing.remove_urls(s), s_true)
"""
Remove brackets
"""
def test_remove_round_brackets(self):
s = pd.Series("Remove all (brackets)(){/}[]<>")
s_true = pd.Series("Remove all {/}[]<>")
self.assertEqual(preprocessing.remove_round_brackets(s), s_true)
def test_remove_curly_brackets(self):
s = | pd.Series("Remove all (brackets)(){/}[]<> { }") | pandas.Series |
import argparse
import os
import re
import unicodedata
import Levenshtein
import pandas
import pandas as pd
import regex
from bz2file import BZ2File
from tqdm import tqdm
from wiktionary_de_parser import Parser
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--wiktionary', type=str, default='dewiktionary-20210701-pages-articles.xml.bz2')
parser.add_argument('--output', type=argparse.FileType('w'), default='wiktionary_relations.tsv')
args = parser.parse_args()
bz = BZ2File(args.wiktionary)
substantive = []
adjektive = []
verben = []
derivs = []
adj_targets = ["bar", "en", "erig", "ern", "fach", "frei", "haft", "ig", "isch",
"lich", "los", "mäßig", "sam", "sch"]
subst_targets = ["chen", "e", "ei", "el", "en", "er", "heit", "ien", "iker", "in", "keit", "lein", "ler", "ling",
"mut",
"nis", "rich", "sal", "schaft", "sel", "tum", "ung"]
with tqdm(unit='B', unit_scale=True, smoothing=0.05, total=os.path.getsize(args.wiktionary)) as pbar:
for record in Parser(bz):
pbar.update(bz._fp.tell() - pbar.n)
if not regex.fullmatch(r'\p{L}+', record['title']): continue
if 'langCode' not in record or record['langCode'] != 'de':
continue
if re.search(r'\|Adjektiv\|', record['wikitext']):
try:
target, lemma, base = process_deriv(record, adj_targets)
derivs.append(['adj_' + target, lemma, base])
except:
pass
if re.search(r'\|Substantiv\|', record['wikitext']):
try:
target, lemma, base = process_deriv(record, subst_targets)
derivs.append(['subst_' + target, lemma, base])
except:
pass
if 'flexion' in record.keys():
flexion = record["flexion"]
wortart = list(record["pos"].keys())[0]
if wortart == "Substantiv":
substantive.append(flexion)
if wortart == "Adjektiv":
adjektive.append(flexion)
if wortart == "Verb":
verben.append(flexion)
flexion["Infinitiv"] = record["title"]
print_verb_infl(verben, args.output)
print_adj_infl(adjektive, args.output)
print_subst_infl(substantive, args.output)
print_deriv(derivs, args.output)
def process_deriv(record, targets):
for t in targets:
lemma = record['title']
if not lemma.endswith(t): continue
herkunft = re.search(r'{{(Herkunft|Ableitung)}}[^{]*(\[\[Ableitung]][^{]*){{', record['wikitext'], re.MULTILINE)
if herkunft is None: continue
herkunft = herkunft.group(2).replace('\n', ' ')
if not re.search(r"''\[\[-" + t + "]]", herkunft): continue
base = [b[0] for b in regex.findall(r"''\[\[(\p{L}+)]](.,;)?''", herkunft)]
def check_prefix(a, b):
return unicodedata.normalize('NFD', a[0]).lower() != unicodedata.normalize('NFD', b[0]).lower()
if len(base) == 0: continue
if len(base) == 1:
candidate = base[0]
if not check_prefix(candidate, lemma): continue
return t, lemma, candidate
else:
# heuristic by closest levenshtein distance
distances = [(b, Levenshtein.distance(lemma.lower(), b.lower() + t)) for b in base if check_prefix(lemma, b)]
candidate, dist = min(distances, key=lambda x: x[1])
if dist <= 3:
return t, lemma, candidate
def print_subst_infl(substantive, out):
substantive = pd.DataFrame(substantive)
labels = dict([('Nominativ Singular', 'nom_sg'), ('Nominativ Plural', 'nom_pl'), ('Dativ Plural', 'dat_pl'),
('Genitiv Singular', 'gen_sg')])
substantive = substantive[labels.keys()].dropna().rename(columns=labels)
substantive.drop_duplicates(subset='nom_sg', keep=False, inplace=True)
substantive = substantive[
substantive.applymap(lambda x: len(x) >= 2 and regex.fullmatch(r'\w+', x) is not None).all(axis=1)]
for col in labels.values():
if col == 'nom_sg': continue
if col == 'nom_pl' or col == 'dat_pl':
selection = substantive[substantive['dat_pl'] != substantive['nom_pl']][['nom_sg', col]]
else:
selection = substantive[['nom_sg', col]]
selection = selection[selection.apply(lambda x: x == selection[col]).sum(axis=1) == 1].drop_duplicates()
for i, row in selection.iterrows():
print('infl_subst_' + col, row['nom_sg'], row[col], sep='\t', file=out)
def print_adj_infl(adjektive, out):
adjektive = pd.DataFrame(adjektive)
adjektive.drop_duplicates(subset='Positiv', keep=False, inplace=True)
for col in ['Komparativ', 'Superlativ']:
selection = adjektive[adjektive.apply(lambda x: x == adjektive[col]).sum(axis=1) == 1][
['Positiv', col]].drop_duplicates()
for i, row in selection.iterrows():
print('infl_adj_' + col.lower(), row['Positiv'], row[col], sep='\t', file=out)
def print_verb_infl(verben, out):
verben = pd.DataFrame(verben)
verben = verben.drop(verben[verben.Präsens_ich.isna()].index)
labels = dict([('Infinitiv', 'inf'), ('Präsens_ich', 'sg_1p_präsens'), ('Präsens_du', 'sg_2p_präsens'),
('Präteritum_ich', 'sg_1p_prät_indikativ'), ('Partizip II', 'partizip_perfekt'),
('Konjunktiv II_ich', 'sg_1p_prät_konjunktiv')])
verben = verben[labels.keys()].dropna().rename(columns=labels)
# verben.drop_duplicates(subset='inf', inplace=True)
verben = verben[verben.applymap(lambda x: len(x) >= 2 and regex.fullmatch(r'\w+', x) is not None).all(axis=1)]
for col in labels.values():
if col == 'inf': continue
selection = verben[verben.apply(lambda x: x == verben[col]).sum(axis=1) == 1][['inf', col]].drop_duplicates()
for i, row in selection.iterrows():
print('infl_verb_' + col, row['inf'], row[col], sep='\t', file=out)
def print_deriv(derivs, out):
df = | pandas.DataFrame(derivs, columns=['derivation', 'base', 'lemma']) | pandas.DataFrame |
""" This file process the IO for the Text similarity index processor """
import math
import os
import pandas as pd
from similarity_processor.similarity_core import get_cosine
from similarity_processor.similarity_core import text_to_vector
import similarity_processor.similarity_logging as cl
LOG = cl.get_logger()
def is_nan(value):
""" Function which identifies the "nan" on empty cells """
try:
return math.isnan(float(value))
except ValueError:
return False
class SimilarityIO:
""" This class is used for IO Processing the text similarity index processing tool.
User input file is fetched here, also intermediate file as well as
the final recommendation creating are tasks for this class """
def __init__(self, file_path, uniq_id, col_int, is_new_text, new_text=None):
"""constructor for SimilarityIO, which initializes the the input variables needed IO
processing """
LOG.info("\nSimilarity_UI \nValues passed:\n")
self.file_path = file_path
LOG.info("Path:%s", str(self.file_path))
self.uniq_id = uniq_id
LOG.info("\nUnique ID Column:%s", str(self.uniq_id))
self.col_int = col_int
LOG.info("\nColumns of Interest:%s", str(self.col_int))
self.is_new_text = is_new_text
self.new_text = new_text
LOG.info("\nNew_text:%s", str(self.new_text))
self.data_frame = None
self.uniq_header = None
def __get_file_path(self):
""" Function used for getting the file path where the results can be stored /
from where input is provided"""
return str(os.path.dirname(self.file_path))
def __get_file_name(self):
""" Function used for getting the input file name which can be further used for naming
the result """
file_path = self.file_path.split("/")
return os.path.splitext(file_path[-1])[0]
def __get_header(self):
""" Function to fetch the header from the inputfile read in the dataframe """
return list(self.data_frame.columns.values)
def __set_uniq_header(self):
""" Function to fetch the unique ID header """
sheet_headers = self.__get_header()
self.uniq_header = sheet_headers[int(self.uniq_id)]
def __get_duplicate_id(self):
""" Function which identifies if any duplicate ID present in the input file """
# List the duplicate ID
__duplicated_list = list(self.data_frame.duplicated())
__du_list = []
__data = [[]]
# Remove the 'NaN' in case of empty cell and filter only IDs
for key, item in enumerate(__duplicated_list):
if item:
__du_list.append(self.data_frame[self.uniq_header][key])
du_list = list(map(lambda x: 0 if is_nan(x) else x, __du_list))
__data = {'Duplicate ID': [nonzero for nonzero in du_list if nonzero != 0]}
# Create DataFrame and write
self.__write_xlsx(pd.DataFrame(__data), "Duplicate_ID")
def __read_to_panda_df(self):
""" Function which read the input data/xlsx to a pandas Data frame """
if not os.path.exists(self.file_path):
LOG.error("\nFile path is invalid")
return False
self.data_frame = | pd.read_excel(self.file_path) | pandas.read_excel |
import numpy as np
import pandas as pd
import pandas_datareader.data as web
import datetime as dt
import requests
import io
import zipfile
from kungfu.series import FinancialSeries
from kungfu.frame import FinancialDataFrame
def download_factor_data(freq='D'):
'''
Downloads factor data from Kenneth French's website and returns dataframe.
freq can be either 'D' (daily) or 'M' (monthly).
'''
if freq is 'D':
# Download Carhartt 4 Factors
factors_daily = web.DataReader("F-F_Research_Data_Factors_daily", "famafrench", start='1/1/1900')[0]
mom = web.DataReader('F-F_Momentum_Factor_daily', 'famafrench', start='1/1/1900')[0]
factors_daily = factors_daily.join(mom)
factors_daily = factors_daily[['Mkt-RF','SMB','HML','Mom ','RF']]
factors_daily.columns = ['Mkt-RF','SMB','HML','Mom','RF']
return FinancialDataFrame(factors_daily)
elif freq is 'M':
# Download Carhartt 4 Factors
factors_monthly = web.DataReader("F-F_Research_Data_Factors", "famafrench", start='1/1/1900')[0]
# mom = web.DataReader('F-F_Momentum_Factor', 'famafrench', start='1/1/1900')[0] #There seems to be a problem with the data file, fix if mom is needed
# factors_monthly = factors_monthly.join(mom)
# factors_monthly = factors_monthly[['Mkt-RF','SMB','HML','Mom ','RF']]
factors_monthly.index = factors_monthly.index.to_timestamp()
# factors_monthly.columns = ['Mkt-RF','SMB','HML','Mom','RF']
factors_monthly.columns = ['Mkt-RF','SMB','HML','RF']
factors_monthly.index = factors_monthly.index+pd.tseries.offsets.MonthEnd(0)
return FinancialDataFrame(factors_monthly)
def download_industry_data(freq='D', excessreturns = True):
'''
Downloads industry data from <NAME>'s website and returns dataframe.
freq can be either 'D' (daily) or 'M' (monthly).
excessreturns is a boolean to define if the the function should return excess returns.
'''
if freq is 'D':
# Download Fama/French 49 Industries
industries_daily = web.DataReader("49_Industry_Portfolios_Daily", "famafrench", start='1/1/1900')[0]
industries_daily[(industries_daily <= -99.99) | (industries_daily == -999)] = np.nan #set missing data to NaN
industries_daily = industries_daily.rename_axis('Industry', axis='columns')
if excessreturns is True:
factors_daily = web.DataReader("F-F_Research_Data_Factors_daily", "famafrench", start='1/1/1900')[0]
industries_daily = industries_daily.subtract(factors_daily['RF'], axis=0) #transform into excess returns
return industries_daily
elif freq is 'M':
# Download Fama/French 49 Industries
industries_monthly = web.DataReader("49_Industry_Portfolios", "famafrench", start='1/1/1900')[0]
industries_monthly[(industries_monthly <= -99.99) | (industries_monthly == -999)] = np.nan #set missing data to NaN
industries_monthly = industries_monthly.rename_axis('Industry', axis='columns')
industries_monthly.index = industries_monthly.index.to_timestamp()
if excessreturns is True:
factors_monthly = web.DataReader("F-F_Research_Data_Factors", "famafrench", start='1/1/1900')[0]
factors_monthly.index = factors_monthly.index.to_timestamp()
industries_monthly = industries_monthly.subtract(factors_monthly['RF'], axis=0) #transform into excess returns
industries_monthly.index = industries_monthly.index+pd.tseries.offsets.MonthEnd(0)
return industries_monthly
def download_25portfolios_data(freq='D', excessreturns = True):
'''
Downloads 25 portfolios data from Kenneth French's website and returns dataframe.
freq can be either 'D' (daily) or 'M' (monthly).
excessreturns is a boolean to define if the the function should return excess returns.
'''
if freq is 'D':
# Download Fama/French 25 portfolios
portfolios_daily = web.DataReader("25_Portfolios_5x5_CSV", "famafrench", start='1/1/1900')[0]
portfolios_daily[(portfolios_daily <= -99.99) | (portfolios_daily == -999)] = np.nan #set missing data to NaN
if excessreturns is True:
factors_daily = web.DataReader("F-F_Research_Data_Factors_daily", "famafrench", start='1/1/1900')[0]
portfolios_daily = portfolios_daily.subtract(factors_daily['RF'], axis=0) #transform into excess returns
return portfolios_daily
elif freq is 'M':
# Download Fama/French 25 portfolios
portfolios_monthly = web.DataReader("25_Portfolios_5x5_Daily_CSV", "famafrench", start='1/1/1900')[0]
portfolios_monthly[(industries_monthly <= -99.99) | (industries_monthly == -999)] = np.nan #set missing data to NaN
portfolios_monthly.index = portfolios_monthly.index.to_timestamp()
if excessreturns is True:
factors_monthly = web.DataReader("F-F_Research_Data_Factors", "famafrench", start='1/1/1900')[0]
factors_monthly.index = factors_monthly.index.to_timestamp()
portfolios_monthly = portfolios_monthly.subtract(factors_monthly['RF'], axis=0) #transform into excess returns
return portfolios_monthly
def download_recessions_data(freq='M', startdate='1/1/1900', enddate=dt.datetime.today()):
'''
Downloads NBER recessions from FRED and returns series.
freq can be either 'D' (daily) or 'M' (monthly).
startdate and enddate define the length of the timeseries.
'''
USREC_monthly = web.DataReader('USREC', 'fred',start = startdate, end=enddate)
if freq is 'M':
return USREC_monthly
if freq is 'D':
first_day = USREC_monthly.index.min() - pd.DateOffset(day=1)
last_day = USREC_monthly.index.max() + pd.DateOffset(day=31)
dayindex = pd.date_range(first_day, last_day, freq='D')
dayindex.name = 'DATE'
USREC_daily = USREC_monthly.reindex(dayindex, method='ffill')
return USREC_daily
def download_jpy_usd_data():
'''
Downloads USD/JPY exchange rate data from FRED and returns series.
'''
jpy = web.DataReader('DEXJPUS', 'fred', start = '1900-01-01')
return jpy
def download_cad_usd_data():
'''
Downloads USD/CAD exchange rate data from FRED and returns series.
'''
cad = web.DataReader('DEXCAUS', 'fred', start = '1900-01-01')
return cad
def download_vix_data():
'''
Downloads VIX index data from FRED and returns series.
'''
vix = web.DataReader('VIXCLS', 'fred', start = '1900-01-01')
return vix
def download_goyal_welch_svar():
'''
Downloads Goyal/Welch SVAR data from Amit Goyal's website and returns DataFrame.
'''
url = 'http://www.hec.unil.ch/agoyal/docs/PredictorData2017.xlsx'
sheet = pd.read_excel(url, sheet_name='Monthly')
dates = sheet['yyyymm']
SVAR = pd.DataFrame(sheet['svar'])
SVAR.index = [(dt.datetime(year = math.floor(date/100),month = date%100,day = 1)+dt.timedelta(days=32)).replace(day=1)-dt.timedelta(days=1) for date in dates]
return SVAR
def download_sadka_liquidity():
'''
Downloads Sadka liquidity factor data from <NAME>'s website and returns DataFrame.
'''
url = 'http://www2.bc.edu/ronnie-sadka/Sadka-LIQ-factors-1983-2012-WRDS.xlsx'
sheet = pd.read_excel(url, sheet_name='Sheet1')
dates = sheet['Date']
SadkaLIQ1 = pd.DataFrame(sheet['Fixed-Transitory'])
SadkaLIQ1.index = [(dt.datetime(year = math.floor(date/100),month = date%100,day = 1)+dt.timedelta(days=32)).replace(day=1)-dt.timedelta(days=1) for date in dates]
SadkaLIQ2 = pd.DataFrame(sheet['Variable-Permanent'])
SadkaLIQ2.index = [(dt.datetime(year = math.floor(date/100),month = date%100,day = 1)+dt.timedelta(days=32)).replace(day=1)-dt.timedelta(days=1) for date in dates]
return SadkaLIQ1, SadkaLIQ2
def download_manela_kelly_he_intermediary():
'''
Downloads Manela/Kelly/He intermediary risk factor data from Manela's website and returns DataFrame.
'''
url = 'http://apps.olin.wustl.edu/faculty/manela/hkm/intermediarycapitalrisk/He_Kelly_Manela_Factors.zip'
filename = 'He_Kelly_Manela_Factors_monthly.csv'
column1 = 'intermediary_capital_ratio'
column2 = 'intermediary_capital_risk_factor'
column3 = 'intermediary_value_weighted_investment_return'
column4 = 'intermediary_leverage_ratio_squared'
raw_data = pd.read_csv(zipfile.ZipFile(io.BytesIO(requests.get(url).content)).open(filename))
Intermediary = | pd.DataFrame(raw_data[[column1,column2,column3,column4]]) | pandas.DataFrame |
# import dash
# import dash_core_components as dcc
# import dash_html_components as html
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
import math
from plotly.subplots import make_subplots
# Disable non-applicable Pandas warnings
pd.options.mode.chained_assignment = None # default='warn'
# Find and piece together assists by finding shots, then checking if the action directly before
# a shot was a pass (or cross, which is treated as a subset of pass in this dataset).
# If so, count it as an assist.
def find_assists(df):
assist_df = pd.DataFrame()
for index, row in df.iterrows():
if row["Type"] == "SHOT":
shooter = int(row["From"])
# Get contents of previous row in df and see if it's a reception or just a shot. If so then add to shot list
try:
previous_row = df.iloc[index - 1]
receiver = int(previous_row["To"])
passer = int(previous_row["From"])
except Exception as e:
print(e)
if previous_row["Type"] == "PASS" and shooter == receiver:
assist_x = previous_row["Start_X"]
assist_y = previous_row["Start_Y"]
receipt_x = row["Start_X"]
receipt_y = row["Start_Y"]
assist_list = [
passer,
shooter,
assist_x,
assist_y,
receipt_x,
receipt_y,
]
assist_df = assist_df.append(
pd.Series(
assist_list,
index=["From", "To", "Start_X", "Start_Y", "End_X", "End_Y"],
),
ignore_index=True,
)
assist_df["Type"] = "Assist to shot"
return assist_df
# Locate and build a dataframe of all set plays, ignoring kick-offs and throw-ins
def find_set_plays(df, mode):
sp_df = pd.DataFrame()
count = 0
for index, row in df.iterrows():
if (
row["Type"] == "SET PIECE"
and row["Subtype"] != "KICK OFF"
and row["Subtype"] != "THROW IN"
):
# Get contents of next row in df and see if it's a reception or just a shot. If so then add to shot list
try:
next_row = df.iloc[index + 1, :]
if next_row["Type"] == "PASS" or next_row["Type"] == "BALL LOST":
count = count + 1
receiver = next_row["To"]
passer = next_row["From"]
assist_x = next_row["Start_X"]
assist_y = next_row["Start_Y"]
receipt_x = next_row["End_X"]
receipt_y = next_row["End_Y"]
event_type = row["Subtype"]
sp_list = [
passer,
receiver,
assist_x,
assist_y,
receipt_x,
receipt_y,
event_type,
]
sp_df = sp_df.append(
pd.Series(
sp_list,
index=[
"From",
"To",
"Start_X",
"Start_Y",
"End_X",
"End_Y",
"Type",
],
),
ignore_index=True,
)
if mode == "progressive":
df = df.drop(index + 1)
except Exception as e:
print(e)
if mode == "progressive":
sp_df = df
sp_df.loc[sp_df.To.isnull(), "Type"] = "Incomplete"
return sp_df
# Make all actions graph from left to right. Depending on which team starts on the left hand side,
# make adjustments to both teams for the opposite half which does NOT go left to right
def left_justify_events(df, team_on_left):
df_half1 = df.loc[df["Period"] == 1]
df_half2 = df.loc[df["Period"] == 2]
if df.iloc[0]["Team"] == team_on_left:
# Reverse all the second half events
df_half2["Start_X"] = df_half2["Start_X"].map(lambda x: 1 - x)
df_half2["End_X"] = df_half2["End_X"].map(lambda x: 1 - x)
df_half2["Start_Y"] = df_half2["Start_Y"].map(lambda x: 1 - x)
df_half2["End_Y"] = df_half2["End_Y"].map(lambda x: 1 - x)
pass
else:
# Reverse all the first half events
df_half1["Start_X"] = df_half1["Start_X"].map(lambda x: 1 - x)
df_half1["End_X"] = df_half1["End_X"].map(lambda x: 1 - x)
df_half1["Start_Y"] = df_half1["Start_X"].map(lambda x: 1 - x)
df_half1["End_Y"] = df_half1["End_Y"].map(lambda x: 1 - x)
df = pd.concat([df_half1, df_half2])
return df
# Once number of clusters is auto-calculated, graph the clusters
def create_cluster_graph(df, num_clusters):
# creates a new trace for each set of data
fig = make_subplots(
rows=math.ceil(num_clusters / 2),
cols=2, # round up to nearest integer
subplot_titles=("Plot 1", "Plot 2", "Plot 3", "Plot 4"),
)
x = df["x"]
y = df["y"]
r = 1 # rows
c = 1 # columns
for index, row in df.iterrows():
fig.add_trace(
go.Scatter(x=x, y=y, marker=dict(color="#009BFF", size=8), opacity=0.8),
row=r,
col=c,
)
if c == 2:
c = 1
r = r + 1
else:
c = c + 1
return fig
# Auto-determine which clustering model fits best with the data and select that
def get_num_clusters(df, maxclusters):
# Get optimal number of clusters given the pattern
sil_score_max = -1 # this is the minimum possible score
for n_clusters in range(2, maxclusters):
model = KMeans(n_clusters=n_clusters, init="k-means++", max_iter=100, n_init=1)
labels = model.fit_predict(df)
sil_score = silhouette_score(df, labels)
# print("The average silhouette score for %i clusters is %0.2f" % (n_clusters, sil_score))
if sil_score > sil_score_max:
sil_score_max = sil_score
best_n_clusters = n_clusters
return best_n_clusters
# Draw arrow annotations for passes, crosses, etc.
def drawAnnotations(df):
# Create annotations for all passes
annotations_list = []
for index, row in df.iterrows():
colour = "white"
opacity_setting = 0.3
# drop all rows that don't have a value in End_X because we don't want annotations for them
df.dropna(subset=["End_X"], inplace=True)
arrow = go.layout.Annotation(
dict(
x=row["End_X"],
y=row["End_Y"],
xref="x",
yref="y",
text="",
showarrow=True,
axref="x",
ayref="y",
ax=row["Start_X"],
ay=row["Start_Y"],
arrowhead=2,
arrowwidth=1.5,
arrowcolor=colour,
opacity=opacity_setting,
startstandoff=4,
)
)
annotations_list.append(arrow)
return annotations_list
def find_progressive_passes(df):
# df = df.loc[(df['End_X'] - df['location_x']) > 1000] # limit passes to those greater than 10M forward
df_own_half = df.loc[
(df["End_X"] < 0.5) & (df["Start_X"] < 0.5)
] # passes in own half
df_diff_half = df.loc[
(df["End_X"] > 0.5) & (df["Start_X"] < 0.5)
] # passes in different half
df_opp_half = df.loc[
(df["End_X"] > 0.5) & (df["Start_X"] > 0.5)
] # passes in opponent's half
goal_x = float(1)
goal_y = float(0.5)
# Passes in own half
if len(df_own_half) > 0:
# dist = math.hypot(x2 - x1, y2 - y1)
df_own_half["orig_distance_to_goal"] = df_own_half.apply(
lambda x: math.hypot(x["Start_X"] - goal_x, x["Start_Y"] - goal_y), axis=1
)
df_own_half["end_distance_to_goal"] = df_own_half.apply(
lambda x: math.hypot(x["End_X"] - goal_x, x["End_Y"] - goal_y), axis=1
)
df_own_half["distance"] = (
df_own_half["orig_distance_to_goal"] - df_own_half["end_distance_to_goal"]
)
df_own_half = df_own_half.loc[(df_own_half["distance"]) >= 0.30]
# Passes in both halves
if len(df_diff_half) > 0:
df_diff_half["orig_distance_to_goal"] = df_diff_half.apply(
lambda x: math.hypot(x["Start_X"] - goal_x, x["Start_Y"] - goal_y), axis=1
)
df_diff_half["end_distance_to_goal"] = df_diff_half.apply(
lambda x: math.hypot(x["End_X"] - goal_x, x["End_Y"] - goal_y), axis=1
)
df_diff_half["distance"] = (
df_diff_half["orig_distance_to_goal"] - df_diff_half["end_distance_to_goal"]
)
df_diff_half = df_diff_half.loc[(df_diff_half["distance"]) >= 0.15]
# Passes in opposition half
if len(df_opp_half) > 0:
df_opp_half["orig_distance_to_goal"] = df_opp_half.apply(
lambda x: math.hypot(x["Start_X"] - goal_x, x["Start_Y"] - goal_y), axis=1
)
df_opp_half["end_distance_to_goal"] = df_opp_half.apply(
lambda x: math.hypot(x["End_X"] - goal_x, x["End_Y"] - goal_y), axis=1
)
df_opp_half["distance"] = (
df_opp_half["orig_distance_to_goal"] - df_opp_half["end_distance_to_goal"]
)
df_opp_half = df_opp_half.loc[(df_opp_half["distance"]) >= 0.12]
df_list = [df_own_half, df_diff_half, df_opp_half] # List of your dataframes
df_combo = pd.concat(df_list)
return df_combo
# Main function - graph all football events which occur in a match
def plotEvents(eventType, filename, team, team_on_left):
# Read in event csv data file
data_file = "data/" + filename
events_df = pd.read_csv(data_file)
events_df = events_df.loc[events_df["Team"] == team]
events_df.reset_index(drop=True, inplace=True)
# change coordinates to into floating numbers
events_df["Start_X"] = | pd.to_numeric(events_df["Start_X"], downcast="float") | pandas.to_numeric |
import numpy as np
import pandas as pd
import datetime as dt
from dateutil import tz
from dateutil.relativedelta import relativedelta
import math
import time
from func_get import get_json, get_time, convert_tz, get_currency, get_last_price, get_position, get_quote_currency_free, get_quote_currency_value, get_order_fee
from func_cal import round_amount, cal_unrealised_future, cal_drawdown_future, cal_available_budget, cal_end_balance
from func_update import update_json, append_order, remove_order, append_cash_flow_df, update_transfer
from func_noti import noti_success_order, noti_warning, print_position
def get_date_list(start_date, end_date=None):
'''
Generate list of date to fetch 1 day iteration from fetch_ohlcv.
'''
if end_date == None:
end_date = dt.date.today()
num_day = (end_date - start_date).days
date_list = [end_date - relativedelta(days=x) for x in range(num_day, -1, -1)]
return date_list
def get_js_date(dt_date, start_hour):
'''
Transform dt.datetime to JavaScript format.
Result based on local timezone.
'''
dt_datetime = dt.datetime(dt_date.year, dt_date.month, dt_date.day, start_hour)
js_datetime = dt_datetime.timestamp() * 1000
return js_datetime
def get_base_time(date, base_timezone):
'''
Convert to base_timezone to check day light saving.
DST change at 2 am.
'''
base_time = dt.datetime(date.year, date.month, date.day, 2, tzinfo=tz.gettz(base_timezone))
return base_time
def cal_dst(date, base_timezone):
today_dst = bool(get_base_time(date, base_timezone).dst())
tomorrow_dst = bool(get_base_time(date + relativedelta(days=1), base_timezone).dst())
return today_dst, tomorrow_dst
def cal_dst_offset(today_dst, tomorrow_dst):
'''
Offset ending of the day before DST change to be in sync with start time of the changing day.
'''
if (today_dst == 0) & (tomorrow_dst == 1):
dst_offset = -60
elif (today_dst == 1) & (tomorrow_dst == 0):
dst_offset = 60
else:
dst_offset = 0
return dst_offset
def get_start_hour(tomorrow_dst):
if tomorrow_dst == 1:
start_hour = 4
else:
start_hour = 5
return start_hour
def get_timeframe(interval):
timeframe_dict = {1440:'1d', 240:'4h', 60:'1h', 15:'15m', 5:'5m', 1:'1m'}
for i in timeframe_dict.keys():
if interval % i == 0:
base_interval = i
break
base_timeframe = timeframe_dict[base_interval]
step = int(interval / base_interval)
return base_timeframe, base_interval, step
def group_timeframe(df, step):
h_dict = {'time':[], 'open':[], 'high':[], 'low':[], 'close':[]}
for i in [x for x in range(0, len(df), step)]:
temp_df = df.iloc[i:min(i + step, len(df)), :].reset_index(drop=True)
h_dict['time'].append(temp_df['time'][0])
h_dict['open'].append(temp_df['open'][0])
h_dict['high'].append(max(temp_df['high']))
h_dict['low'].append(min(temp_df['low']))
h_dict['close'].append(temp_df['close'][len(temp_df) - 1])
df = pd.DataFrame(h_dict)
return df
def get_ohlcv(exchange, config_params_path):
ohlcv_df = pd.DataFrame(columns = ['time', 'open', 'high', 'low', 'close'])
config_params = get_json(config_params_path)
base_timeframe, base_interval, step = get_timeframe(config_params['interval'])
# Get start date to cover window range.
min_num_date = np.ceil((config_params['interval'] * config_params['window']) / 1440)
start_date = dt.date.today() - relativedelta(days=min_num_date)
date_list = get_date_list(start_date)
for date in date_list:
today_dst, tomorrow_dst = cal_dst(date, config_params['base_timezone'])
dst_offset = cal_dst_offset(today_dst, tomorrow_dst)
start_hour = get_start_hour(today_dst)
limit = int((1440 / base_interval) + (dst_offset / base_interval))
since = get_js_date(date, start_hour)
ohlcv = exchange.fetch_ohlcv(config_params['symbol'], base_timeframe, since, limit)
if len(ohlcv) > 0:
temp_df = pd.DataFrame(ohlcv)
temp_df.columns = ['time', 'open', 'high', 'low', 'close', 'volume']
temp_df['time'] = pd.to_datetime(temp_df['time'], unit='ms')
temp_df['time'] = temp_df['time'].apply(lambda x: convert_tz(x))
# Remove timezone after offset timezone.
temp_df['time'] = temp_df['time'].dt.tz_localize(None)
if step > 1:
date_df = group_timeframe(temp_df, step)
else:
date_df = temp_df[['time', 'open', 'high', 'low', 'close']]
ohlcv_df = pd.concat([ohlcv_df, date_df])
ohlcv_df = ohlcv_df.reset_index(drop=True)
signal_timestamp = ohlcv_df['time'][len(ohlcv_df) - 1]
ohlcv_df = ohlcv_df.iloc[:len(ohlcv_df) - 1, :]
return ohlcv_df, signal_timestamp
def update_price(close_price, signal_price, last_loop_path):
last_loop = get_json(last_loop_path)
last_loop['close_price'] = float(close_price)
last_loop['signal_price'] = signal_price
update_json(last_loop, last_loop_path)
def update_side(side, last_loop_path):
last_loop = get_json(last_loop_path)
last_loop['side'] = side
update_json(last_loop, last_loop_path)
def update_signal_timestamp(signal_timestamp, last_loop_path):
last_loop = get_json(last_loop_path)
last_loop['signal_timestamp'] = str(signal_timestamp)
update_json(last_loop, last_loop_path)
def update_open_position(order, exchange, config_params, position_path):
position = get_json(position_path)
fee = get_order_fee(order, exchange, config_params['symbol'])
position['side'] = order['side']
position['entry_price'] = order['price']
position['amount'] = order['amount']
position['open_fee'] = fee
update_json(position, position_path)
def update_reduce_position(order, position_path):
position = get_json(position_path)
amount = position['amount']
amount -= order['amount']
position['amount'] = amount
update_json(position, position_path)
def update_max_drawdown(drawdown, last_loop_path):
last_loop = get_json(last_loop_path)
last_loop['max_drawdown'] = drawdown
update_json(last_loop, last_loop_path)
def check_new_timestamp(signal_timestamp, config_params_path, last_loop_path):
config_params = get_json(config_params_path)
last_loop = get_json(last_loop_path)
if last_loop['signal_timestamp'] == 0:
# One first loop, bypass to manage_position to update last_loop.
new_timestamp_flag = True
else:
last_signal_timestamp = pd.to_datetime(last_loop['signal_timestamp'])
expected_timestamp = last_signal_timestamp + relativedelta(minutes=config_params['interval'])
if signal_timestamp >= expected_timestamp:
new_timestamp_flag = True
else:
new_timestamp_flag = False
return new_timestamp_flag
def action_cross_signal(ohlcv_df):
close_price = ohlcv_df.loc[len(ohlcv_df) - 1, 'close']
signal_price = ohlcv_df.loc[len(ohlcv_df) - 1, 'signal']
if close_price < signal_price:
action = 'sell'
elif close_price > signal_price:
action = 'buy'
else:
action = 'hold'
return action, signal_price
def action_bound_signal(ohlcv_df):
close_price = ohlcv_df.loc[len(ohlcv_df) - 1, 'close']
last_min_signal = ohlcv_df.loc[len(ohlcv_df) - 1, 'min_signal']
last_max_signal = ohlcv_df.loc[len(ohlcv_df) - 1, 'max_signal']
signal_price = {'min':last_min_signal, 'max':last_max_signal}
if close_price < last_min_signal:
action = 'buy'
elif close_price > last_max_signal:
action = 'sell'
else:
action = 'hold'
return action, signal_price
def signal_ma(ohlcv_df, config_params):
ohlcv_df['signal'] = ohlcv_df['close'].rolling(window=int(np.round(config_params['window']))).mean()
action, signal_price = action_cross_signal(ohlcv_df)
return action, signal_price
def signal_tma(ohlcv_df, config_params):
sub_interval = (config_params['window'] + 1) / 2
# Trunc ma to get minimum avg steps.
ohlcv_df['ma'] = ohlcv_df['close'].rolling(window=math.trunc(sub_interval)).mean()
# Round tma to reach window steps.
ohlcv_df['signal'] = ohlcv_df['ma'].rolling(window=int(np.round(sub_interval))).mean()
action, signal_price = action_cross_signal(ohlcv_df)
return action, signal_price
def signal_bollinger(ohlcv_df, config_params):
ohlcv_df['ma'] = ohlcv_df['close'].rolling(window=int(np.round(config_params['window']))).mean()
ohlcv_df['std'] = ohlcv_df['close'].rolling(window=int(np.round(config_params['window']))).std()
ohlcv_df['min_signal'] = ohlcv_df['ma'] - (2 * ohlcv_df['std'])
ohlcv_df['max_signal'] = ohlcv_df['ma'] + (2 * ohlcv_df['std'])
action, signal_price = action_bound_signal(ohlcv_df)
return action, signal_price
def get_action(ohlcv_df, config_params):
func_dict = {
'ma':signal_ma,
'tma': signal_tma,
'bollinger': signal_bollinger
}
action, signal_price = func_dict[config_params['signal']](ohlcv_df, config_params)
return action, signal_price
def cal_new_amount(value, exchange, config_params):
leverage_value = value * config_params['leverage']
last_price = get_last_price(exchange, config_params['symbol'])
amount = leverage_value / last_price
amount = round_amount(amount, exchange, config_params['symbol'], type='down')
return amount
def cal_reduce_amount(value, exchange, config_params):
leverage_value = value * config_params['leverage']
last_price = get_last_price(exchange, config_params['symbol'])
amount = leverage_value / last_price
amount = round_amount(amount, exchange, config_params['symbol'], type='up')
return amount
def append_profit_technical(order, exchange, config_params, position_path, profit_df_path):
profit_df = pd.read_csv(profit_df_path)
position = get_json(position_path)
timestamp = get_time()
if position['side'] == 'buy':
margin = order['price'] - position['entry_price']
elif position['side'] == 'sell':
margin = position['entry_price'] - order['price']
open_fee = position['open_fee']
close_fee = get_order_fee(order, exchange, config_params['symbol'])
total_fee = open_fee + close_fee
profit = (margin * order['amount']) - total_fee
profit_df.loc[len(profit_df)] = [timestamp, order['id'], order['symbol'], order['side'], order['amount'], position['entry_price'], order['price'], total_fee, profit]
profit_df.to_csv(profit_df_path, index=False)
def open_position(available_budget, action, exchange, config_params_path, open_orders_df_path):
config_params = get_json(config_params_path)
amount = cal_new_amount(available_budget, exchange, config_params)
order = exchange.create_order(config_params['symbol'], 'market', action, amount)
append_order(order, 'amount', open_orders_df_path)
def close_position(action, position, exchange, config_params_path, open_orders_df_path):
config_params = get_json(config_params_path)
order = exchange.create_order(config_params['symbol'], 'market', action, position['amount'], params={'reduceOnly': True})
append_order(order, 'amount', open_orders_df_path)
def reduce_position(value, action, exchange, config_params_path, open_orders_df_path):
config_params = get_json(config_params_path)
amount = cal_reduce_amount(value, exchange, config_params)
order = exchange.create_order(config_params['symbol'], 'market', action, amount)
append_order(order, 'amount', open_orders_df_path)
def clear_orders_technical(exchange, bot_name, config_system, config_params, open_orders_df_path, transactions_df_path):
open_orders_df = pd.read_csv(open_orders_df_path)
if len(open_orders_df) > 0:
order_id = open_orders_df['order_id'][0]
order = exchange.fetch_order(order_id, config_params['symbol'])
while order['status'] != 'closed':
order = exchange.fetch_order(order_id, config_params['symbol'])
time.sleep(config_system['idle_stage'])
remove_order(order_id, open_orders_df_path)
append_order(order, 'filled', transactions_df_path)
noti_success_order(order, bot_name, config_params['symbol'])
return order
def withdraw_position(net_transfer, exchange, bot_name, config_system, config_params_path, position_path, open_orders_df_path, transactions_df_path, profit_df_path):
config_params = get_json(config_params_path)
position = get_json(position_path)
reverse_action = {'buy':'sell', 'sell':'buy'}
action = reverse_action[position['side']]
reduce_position(-net_transfer, action, exchange, config_params_path, open_orders_df_path)
time.sleep(config_system['idle_stage'])
reduce_order = clear_orders_technical(exchange, bot_name, config_system, config_params, open_orders_df_path, transactions_df_path)
append_profit_technical(reduce_order, exchange, config_params, position_path, profit_df_path)
update_reduce_position(reduce_order, position_path)
def manage_position(ohlcv_df, exchange, bot_name, config_system, config_params_path, last_loop_path, position_path, transfer_path, open_orders_df_path, transactions_df_path, profit_df_path, cash_flow_df_path):
config_params = get_json(config_params_path)
_, quote_currency = get_currency(config_params['symbol'])
last_loop = get_json(last_loop_path)
action, signal_price = get_action(ohlcv_df, config_params)
close_price = ohlcv_df.loc[len(ohlcv_df) - 1, 'close']
print(f"Close price: {close_price} {quote_currency}")
print(f"Signal price: {signal_price} {quote_currency}")
if action == 'hold':
action = last_loop['side']
if last_loop['side'] not in [action, 'start']:
position = get_json(position_path)
if position['amount'] > 0:
close_position(action, position, exchange, config_params_path, open_orders_df_path)
time.sleep(config_system['idle_stage'])
close_order = clear_orders_technical(exchange, bot_name, config_system, config_params, open_orders_df_path, transactions_df_path)
append_profit_technical(close_order, exchange, config_params, position_path, profit_df_path)
update_reduce_position(close_order, position_path)
transfer = get_json(transfer_path)
quote_currency_free = get_quote_currency_free(exchange, quote_currency)
# Technical bot not collect cash flow.
available_budget = cal_available_budget(quote_currency_free, 0, transfer)
open_position(available_budget, action, exchange, config_params_path, open_orders_df_path)
time.sleep(config_system['idle_stage'])
open_order = clear_orders_technical(exchange, bot_name, config_system, config_params, open_orders_df_path, transactions_df_path)
update_open_position(open_order, exchange, config_params, position_path)
else:
print("No action")
update_price(close_price, signal_price, last_loop_path)
update_side(action, last_loop_path)
def update_end_date_technical(prev_date, exchange, bot_name, config_system, config_params_path, position_path, transfer_path, open_orders_df_path, transactions_df_path, profit_df_path, cash_flow_df_path):
config_params = get_json(config_params_path)
cash_flow_df_path = cash_flow_df_path.format(bot_name)
cash_flow_df = pd.read_csv(cash_flow_df_path)
last_price = get_last_price(exchange, config_params['symbol'])
position = get_json(position_path)
_, quote_currency = get_currency(config_params['symbol'])
unrealised = cal_unrealised_future(last_price, position)
profit_df = | pd.read_csv(profit_df_path) | pandas.read_csv |
import argparse
import os
import pandas as pd
from sklearn.utils import shuffle
def split_species(specie_set, test_count, valid_count, seed=42):
# Create empty dataframes
train_set = pd.DataFrame(columns=specie_set.columns)
test_set = | pd.DataFrame(columns=specie_set.columns) | pandas.DataFrame |
from abc import ABC
from spikeextractors import SortingExtractor
from pathlib import Path
import numpy as np
try:
import pandas as pd
HAVE_PANDAS = True
except:
HAVE_PANDAS = False
class ALFSortingExtractor(SortingExtractor):
extractor_name = 'ALFSortingExtractor'
installed = HAVE_PANDAS # check at class level if installed or not
is_writable = True
mode = 'folder'
installation_mesg = "To use the ALFSortingExtractor run:\n\n pip install pandas\n\n"
def __init__(self, folder_path, sampling_frequency=30000):
assert HAVE_PANDAS, self.installation_mesg
SortingExtractor.__init__(self)
# check correct parent folder:
self.file_loc = Path(folder_path)
if 'probe' not in Path(self.file_loc).name:
raise ValueError('folder name should contain "probe", containing channels, clusters.* .npy datasets')
# load datasets as mmap into a dict:
self._required_alf_datasets = ['spikes.times', 'spikes.clusters']
self._found_alf_datasets = dict()
for alf_dataset_name in self.file_loc.iterdir():
if 'spikes' in alf_dataset_name.stem or 'clusters' in alf_dataset_name.stem:
if 'npy' in alf_dataset_name.suffix:
self._found_alf_datasets.update({alf_dataset_name.stem: self._load_npy(alf_dataset_name)})
elif 'metrics' in alf_dataset_name.stem:
self._found_alf_datasets.update({alf_dataset_name.stem: | pd.read_csv(alf_dataset_name) | pandas.read_csv |
"""
Tests that the file header is properly handled or inferred
during parsing for all of the parsers defined in parsers.py
"""
from collections import namedtuple
from io import StringIO
import numpy as np
import pytest
from pandas.errors import ParserError
from pandas import (
DataFrame,
Index,
MultiIndex,
)
import pandas._testing as tm
# TODO(1.4): Change me to xfails at release time
skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
@skip_pyarrow
def test_read_with_bad_header(all_parsers):
parser = all_parsers
msg = r"but only \d+ lines in file"
with pytest.raises(ValueError, match=msg):
s = StringIO(",,")
parser.read_csv(s, header=[10])
def test_negative_header(all_parsers):
# see gh-27779
parser = all_parsers
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
with pytest.raises(
ValueError,
match="Passing negative integer to header is invalid. "
"For no header, use header=None instead",
):
parser.read_csv(StringIO(data), header=-1)
@pytest.mark.parametrize("header", [([-1, 2, 4]), ([-5, 0])])
def test_negative_multi_index_header(all_parsers, header):
# see gh-27779
parser = all_parsers
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
with pytest.raises(
ValueError, match="cannot specify multi-index header with negative integers"
):
parser.read_csv(StringIO(data), header=header)
@pytest.mark.parametrize("header", [True, False])
def test_bool_header_arg(all_parsers, header):
# see gh-6114
parser = all_parsers
data = """\
MyColumn
a
b
a
b"""
msg = "Passing a bool to header is invalid"
with pytest.raises(TypeError, match=msg):
parser.read_csv(StringIO(data), header=header)
def test_no_header_prefix(all_parsers):
parser = all_parsers
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
result = parser.read_csv(StringIO(data), prefix="Field", header=None)
expected = DataFrame(
[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]],
columns=["Field0", "Field1", "Field2", "Field3", "Field4"],
)
tm.assert_frame_equal(result, expected)
@skip_pyarrow
def test_header_with_index_col(all_parsers):
parser = all_parsers
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ["A", "B", "C"]
result = parser.read_csv(StringIO(data), names=names)
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["foo", "bar", "baz"],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(result, expected)
def test_header_not_first_line(all_parsers):
parser = all_parsers
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
result = parser.read_csv(StringIO(data), header=2, index_col=0)
expected = parser.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(result, expected)
@skip_pyarrow
def test_header_multi_index(all_parsers):
parser = all_parsers
expected = tm.makeCustomDataframe(5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
result = parser.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[0, 1])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"kwargs,msg",
[
(
{"index_col": ["foo", "bar"]},
(
"index_col must only contain "
"row numbers when specifying "
"a multi-index header"
),
),
(
{"index_col": [0, 1], "names": ["foo", "bar"]},
("cannot specify names when specifying a multi-index header"),
),
(
{"index_col": [0, 1], "usecols": ["foo", "bar"]},
("cannot specify usecols when specifying a multi-index header"),
),
],
)
def test_header_multi_index_invalid(all_parsers, kwargs, msg):
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), header=[0, 1, 2, 3], **kwargs)
_TestTuple = namedtuple("_TestTuple", ["first", "second"])
@skip_pyarrow
@pytest.mark.parametrize(
"kwargs",
[
{"header": [0, 1]},
{
"skiprows": 3,
"names": [
("a", "q"),
("a", "r"),
("a", "s"),
("b", "t"),
("c", "u"),
("c", "v"),
],
},
{
"skiprows": 3,
"names": [
_TestTuple("a", "q"),
_TestTuple("a", "r"),
_TestTuple("a", "s"),
_TestTuple("b", "t"),
_TestTuple("c", "u"),
_TestTuple("c", "v"),
],
},
],
)
def test_header_multi_index_common_format1(all_parsers, kwargs):
parser = all_parsers
expected = DataFrame(
[[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=["one", "two"],
columns=MultiIndex.from_tuples(
[("a", "q"), ("a", "r"), ("a", "s"), ("b", "t"), ("c", "u"), ("c", "v")]
),
)
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = parser.read_csv(StringIO(data), index_col=0, **kwargs)
tm.assert_frame_equal(result, expected)
@skip_pyarrow
@pytest.mark.parametrize(
"kwargs",
[
{"header": [0, 1]},
{
"skiprows": 2,
"names": [
("a", "q"),
("a", "r"),
("a", "s"),
("b", "t"),
("c", "u"),
("c", "v"),
],
},
{
"skiprows": 2,
"names": [
_TestTuple("a", "q"),
_TestTuple("a", "r"),
_TestTuple("a", "s"),
_TestTuple("b", "t"),
_TestTuple("c", "u"),
_TestTuple("c", "v"),
],
},
],
)
def test_header_multi_index_common_format2(all_parsers, kwargs):
parser = all_parsers
expected = DataFrame(
[[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=["one", "two"],
columns=MultiIndex.from_tuples(
[("a", "q"), ("a", "r"), ("a", "s"), ("b", "t"), ("c", "u"), ("c", "v")]
),
)
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = parser.read_csv(StringIO(data), index_col=0, **kwargs)
tm.assert_frame_equal(result, expected)
@skip_pyarrow
@pytest.mark.parametrize(
"kwargs",
[
{"header": [0, 1]},
{
"skiprows": 2,
"names": [
("a", "q"),
("a", "r"),
("a", "s"),
("b", "t"),
("c", "u"),
("c", "v"),
],
},
{
"skiprows": 2,
"names": [
_TestTuple("a", "q"),
_TestTuple("a", "r"),
_TestTuple("a", "s"),
_TestTuple("b", "t"),
_TestTuple("c", "u"),
_TestTuple("c", "v"),
],
},
],
)
def test_header_multi_index_common_format3(all_parsers, kwargs):
parser = all_parsers
expected = DataFrame(
[[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=["one", "two"],
columns=MultiIndex.from_tuples(
[("a", "q"), ("a", "r"), ("a", "s"), ("b", "t"), ("c", "u"), ("c", "v")]
),
)
expected = expected.reset_index(drop=True)
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = parser.read_csv(StringIO(data), index_col=None, **kwargs)
tm.assert_frame_equal(result, expected)
@skip_pyarrow
def test_header_multi_index_common_format_malformed1(all_parsers):
parser = all_parsers
expected = DataFrame(
np.array([[2, 3, 4, 5, 6], [8, 9, 10, 11, 12]], dtype="int64"),
index=Index([1, 7]),
columns=MultiIndex(
levels=[["a", "b", "c"], ["r", "s", "t", "u", "v"]],
codes=[[0, 0, 1, 2, 2], [0, 1, 2, 3, 4]],
names=["a", "q"],
),
)
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = parser.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
@skip_pyarrow
def test_header_multi_index_common_format_malformed2(all_parsers):
parser = all_parsers
expected = DataFrame(
np.array([[2, 3, 4, 5, 6], [8, 9, 10, 11, 12]], dtype="int64"),
index=Index([1, 7]),
columns=MultiIndex(
levels=[["a", "b", "c"], ["r", "s", "t", "u", "v"]],
codes=[[0, 0, 1, 2, 2], [0, 1, 2, 3, 4]],
names=[None, "q"],
),
)
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = parser.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
@skip_pyarrow
def test_header_multi_index_common_format_malformed3(all_parsers):
parser = all_parsers
expected = DataFrame(
np.array([[3, 4, 5, 6], [9, 10, 11, 12]], dtype="int64"),
index=MultiIndex(levels=[[1, 7], [2, 8]], codes=[[0, 1], [0, 1]]),
columns=MultiIndex(
levels=[["a", "b", "c"], ["s", "t", "u", "v"]],
codes=[[0, 1, 2, 2], [0, 1, 2, 3]],
names=[None, "q"],
),
)
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = parser.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
@skip_pyarrow
def test_header_multi_index_blank_line(all_parsers):
# GH 40442
parser = all_parsers
data = [[None, None], [1, 2], [3, 4]]
columns = MultiIndex.from_tuples([("a", "A"), ("b", "B")])
expected = DataFrame(data, columns=columns)
data = "a,b\nA,B\n,\n1,2\n3,4"
result = parser.read_csv(StringIO(data), header=[0, 1])
tm.assert_frame_equal(expected, result)
@skip_pyarrow
@pytest.mark.parametrize(
"data,header", [("1,2,3\n4,5,6", None), ("foo,bar,baz\n1,2,3\n4,5,6", 0)]
)
def test_header_names_backward_compat(all_parsers, data, header):
# see gh-2539
parser = all_parsers
expected = parser.read_csv(StringIO("1,2,3\n4,5,6"), names=["a", "b", "c"])
result = parser.read_csv(StringIO(data), names=["a", "b", "c"], header=header)
tm.assert_frame_equal(result, expected)
@skip_pyarrow
@pytest.mark.parametrize("kwargs", [{}, {"index_col": False}])
def test_read_only_header_no_rows(all_parsers, kwargs):
# See gh-7773
parser = all_parsers
expected = DataFrame(columns=["a", "b", "c"])
result = parser.read_csv(StringIO("a,b,c"), **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"kwargs,names",
[
({}, [0, 1, 2, 3, 4]),
({"prefix": "X"}, ["X0", "X1", "X2", "X3", "X4"]),
(
{"names": ["foo", "bar", "baz", "quux", "panda"]},
["foo", "bar", "baz", "quux", "panda"],
),
],
)
def test_no_header(all_parsers, kwargs, names):
parser = all_parsers
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = DataFrame(
[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]], columns=names
)
result = parser.read_csv(StringIO(data), header=None, **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("header", [["a", "b"], "string_header"])
def test_non_int_header(all_parsers, header):
# see gh-16338
msg = "header must be integer or list of integers"
data = """1,2\n3,4"""
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), header=header)
@skip_pyarrow
def test_singleton_header(all_parsers):
# see gh-7757
data = """a,b,c\n0,1,2\n1,2,3"""
parser = all_parsers
expected = DataFrame({"a": [0, 1], "b": [1, 2], "c": [2, 3]})
result = parser.read_csv(StringIO(data), header=[0])
tm.assert_frame_equal(result, expected)
@skip_pyarrow
@pytest.mark.parametrize(
"data,expected",
[
(
"A,A,A,B\none,one,one,two\n0,40,34,0.1",
DataFrame(
[[0, 40, 34, 0.1]],
columns=MultiIndex.from_tuples(
[("A", "one"), ("A", "one.1"), ("A", "one.2"), ("B", "two")]
),
),
),
(
"A,A,A,B\none,one,one.1,two\n0,40,34,0.1",
DataFrame(
[[0, 40, 34, 0.1]],
columns=MultiIndex.from_tuples(
[("A", "one"), ("A", "one.1"), ("A", "one.1.1"), ("B", "two")]
),
),
),
(
"A,A,A,B,B\none,one,one.1,two,two\n0,40,34,0.1,0.1",
DataFrame(
[[0, 40, 34, 0.1, 0.1]],
columns=MultiIndex.from_tuples(
[
("A", "one"),
("A", "one.1"),
("A", "one.1.1"),
("B", "two"),
("B", "two.1"),
]
),
),
),
],
)
def test_mangles_multi_index(all_parsers, data, expected):
# see gh-18062
parser = all_parsers
result = parser.read_csv(StringIO(data), header=[0, 1])
tm.assert_frame_equal(result, expected)
@skip_pyarrow
@pytest.mark.parametrize("index_col", [None, [0]])
@pytest.mark.parametrize(
"columns", [None, (["", "Unnamed"]), (["Unnamed", ""]), (["Unnamed", "NotUnnamed"])]
)
def test_multi_index_unnamed(all_parsers, index_col, columns):
# see gh-23687
#
# When specifying a multi-index header, make sure that
# we don't error just because one of the rows in our header
# has ALL column names containing the string "Unnamed". The
# correct condition to check is whether the row contains
# ALL columns that did not have names (and instead were given
# placeholder ones).
parser = all_parsers
header = [0, 1]
if index_col is None:
data = ",".join(columns or ["", ""]) + "\n0,1\n2,3\n4,5\n"
else:
data = ",".join([""] + (columns or ["", ""])) + "\n,0,1\n0,2,3\n1,4,5\n"
if columns is None:
msg = (
r"Passed header=\[0,1\] are too "
r"many rows for this multi_index of columns"
)
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), header=header, index_col=index_col)
else:
result = parser.read_csv(StringIO(data), header=header, index_col=index_col)
exp_columns = []
for i, col in enumerate(columns):
if not col: # Unnamed.
col = f"Unnamed: {i if index_col is None else i + 1}_level_0"
exp_columns.append(col)
columns = MultiIndex.from_tuples(zip(exp_columns, ["0", "1"]))
expected = DataFrame([[2, 3], [4, 5]], columns=columns)
tm.assert_frame_equal(result, expected)
@skip_pyarrow
def test_read_csv_multiindex_columns(all_parsers):
# GH#6051
parser = all_parsers
s1 = "Male, Male, Male, Female, Female\nR, R, L, R, R\n.86, .67, .88, .78, .81"
s2 = (
"Male, Male, Male, Female, Female\n"
"R, R, L, R, R\n"
".86, .67, .88, .78, .81\n"
".86, .67, .88, .78, .82"
)
mi = MultiIndex.from_tuples(
[
("Male", "R"),
(" Male", " R"),
(" Male", " L"),
(" Female", " R"),
(" Female", " R.1"),
]
)
expected = DataFrame(
[[0.86, 0.67, 0.88, 0.78, 0.81], [0.86, 0.67, 0.88, 0.78, 0.82]], columns=mi
)
df1 = parser.read_csv(StringIO(s1), header=[0, 1])
| tm.assert_frame_equal(df1, expected.iloc[:1]) | pandas._testing.assert_frame_equal |
# Outside Imports
import dash
import glob
import plotly
import json
import numpy as np
import pandas as pd
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.express as px
token = '<KEY>'
px.set_mapbox_access_token(token)
# Imports from this application
from app import app
url = 'routes/routes.csv'
routes_df = pd.read_csv(url, encoding='latin-1')
routes_df = routes_df[['StateNum', 'Route', 'RouteName', 'Active', 'Latitude', 'Longitude', 'Stratum', 'BCR', 'RouteTypeID']]
class Bird:
url = 'routes/routes.csv'
routes_df = pd.read_csv(url, encoding='latin-1')
routes_df = routes_df[['StateNum', 'Route', 'RouteName', 'Active', 'Latitude', 'Longitude', 'Stratum', 'BCR', 'RouteTypeID']]
def __init__(self, bird_num, low_year, high_year):
self.bird_num = bird_num
self.low_year = low_year
self.high_year = high_year
def get_state_data(self):
path = r"states"
all_files = glob.glob(path + "/*.csv")
li = []
for filename in all_files:
df = pd.read_csv(filename, index_col=None, header=0)
state_num = df.at[0, "StateNum"]
Adf = self.bird_function(df)
Bdf = self.year_function(Adf)
Cdf = self.route_count_function(Bdf)
Ddf = self.route_clean_function(state_num)
Edf = self.create_array(Cdf)
Fdf = self.finish_df(Ddf, Cdf, Edf)
li.append(Fdf)
frame = pd.concat(li, axis=0, ignore_index=True)
return frame
def bird_function(self, state_df):
beginning_df = state_df[(state_df["AOU"] == self.bird_num)]
return beginning_df
def year_function(self, state_df):
df = state_df[(state_df["Year"] >= self.low_year) & (state_df["Year"] <= self.high_year)]
df = df.reset_index(drop=True)
return df
def route_count_function(self, state_df):
df = state_df.groupby("Route").SpeciesTotal.sum().reset_index()
df['AOU'] = str(self.bird_num)
return df
def route_clean_function(self, state_number):
df = routes_df[(routes_df["StateNum"] == state_number)]
df = df.sort_values(by="Route", ascending=True)
df = df.reset_index(drop=True)
return df
def create_array(self, st_routes):
route_array = []
for ind in st_routes.index:
route_array.append(st_routes["Route"][ind])
return route_array
def finish_df(self, st_routes, st_counts, array):
booleans = []
x = 0
for ind in st_routes.index:
if x == len(array):
booleans.append(False)
elif (st_routes['Route'][ind] == array[x]):
booleans.append(True)
x += 1
else:
booleans.append(False)
is_route = pd.Series(booleans)
df = st_routes[is_route]
final_df = pd.merge(df, st_counts, on=["Route"], how="inner")
return final_df
# Will use this in the Route Class and Year Class sorts by year for graphing
def sort_by_year(self, df):
new_df = df.sort_values(by="Year", ascending=True)
final_df = new_df.reset_index(drop=True)
return final_df
@classmethod
def map_graph(cls, df):
df["text"] = ("Count: " + df["SpeciesTotal"].astype(str))
fig = px.scatter_mapbox(data_frame=df, lat = "Latitude", lon = "Longitude",
mapbox_style = "light", color = "SpeciesTotal", size = "SpeciesTotal",
text = "RouteName", hover_name = "text",
color_continuous_scale = px.colors.sequential.Rainbow, opacity = 0.3,
size_max=25, zoom = 4.5, center= {"lat": 39.25, "lon": -84.8})
fig.update_layout(clickmode='event+select')
return fig
class Route(Bird):
def __init__(self, bird_num, low_year, high_year, route):
super().__init__(bird_num, low_year, high_year)
self.route = route
def get_route_data(self):
path = r"states"
all_files = glob.glob(path + "/*.csv")
li = []
for filename in all_files:
df = pd.read_csv(filename, index_col=None, header=0)
df = df[['StateNum', 'Route', 'RPID', 'Year', 'AOU', 'SpeciesTotal']]
state_num = df.at[0, "StateNum"]
cleaned_rts = self.route_clean_function(state_num)
combined_strts = | pd.merge(df, cleaned_rts) | pandas.merge |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.