prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
from sklearn.model_selection import StratifiedKFold
import numpy as np
from tqdm import tqdm
from simulation import load_sample
from nn_model import load_model, performance, repeated_experiment
from keras.callbacks import EarlyStopping
from keras import backend as K
from sklearn.metrics import accuracy_score
randint = np.random.randint
def fold_generator(data, k):
x, y = data
skf = StratifiedKFold(n_splits=k, shuffle=True)
for train_idx, val_idx in skf.split(x, y.argmax(axis=1)):
x_train, x_val = x[train_idx], x[val_idx]
y_train, y_val = y[train_idx], y[val_idx]
yield (x_train, y_train), (x_val, y_val)
def hp_generator(lr_range, wd_range, do_range):
lr_min, lr_max = lr_range
random_lr = randint(1, 10) * 10 ** randint(lr_min, lr_max)
wd_min, wd_max = wd_range
random_wd = randint(1, 10) * 10 ** randint(wd_min, wd_max)
do_min, do_max = do_range
random_do = np.random.uniform(do_min, do_max)
return random_lr, random_wd, random_do
def sample_hp(d):
return hp_generator(d["lr_range"], d["wd_range"], d["do_range"])
def ncv_single(std, repeat):
list_std = list(range(1, 21))
k = 5
n_train = 224
n_val = 56
n_loads = 5000
p = 256
repeats_hp = 20
hp = {'lr_range': (-4, -1),
'wd_range': (-5, -3),
'do_range': (0.3, 0.7)}
# name = f"bncv_sco.csv"
out_names = ["ncv", "bncv", "bncv_top_3", "bncv_top_5"]
final_results = pd.DataFrame(columns=["name", "score", "std"])
line_counter = 0
data_loads = load_sample(n_loads, p, std)
data_cv = load_sample(n_train+n_val, p, std)
outputs = cv(data_cv, hp, k, repeats_hp, data_loads)
for name, score in zip(out_names, outputs):
final_results.loc[line_counter, "name"] = name
final_results.loc[line_counter, "score"] = score
final_results.loc[line_counter, "std"] = std
line_counter += 1
final_results.to_csv(f"bncv_vs_ncv_{std}_{repeat}.csv", index=False)
def ncv():
k = 5
n_train = 224
n_val = 56
n_loads = 5000
p = 256
list_std = list(range(1, 21))
repeats_hp = 20
repeats_simulated_data = 20
hp = {'lr_range': (-4, -1),
'wd_range': (-5, -3),
'do_range': (0.3, 0.7)}
# name = f"bncv_sco.csv"
out_names = ["ncv", "bncv", "bncv_top_3", "bncv_top_5"]
final_results = pd.DataFrame(columns=["name", "score", "std"])
line_counter = 0
for std in tqdm(list_std):
data_loads = load_sample(n_loads, p, std)
for _ in tqdm(range(repeats_simulated_data)):
data_cv = load_sample(n_train+n_val, p, std)
outputs = cv(data_cv, hp, k, repeats_hp, data_loads)
for name, score in zip(out_names, outputs):
final_results.loc[line_counter, "name"] = name
final_results.loc[line_counter, "score"] = score
final_results.loc[line_counter, "std"] = std
line_counter += 1
final_results.to_csv("bncv_vs_ncv.csv", index=False)
def cv(data, hp, k, repeats, data_test):
max_epoch = 400
epoch_patience = 10
fcn = 256
columns = ["wd", "do", "lr"] + list(range(k))
results = pd.DataFrame(columns=columns)
test_predictions = {}
for _ in range(repeats):
lr, wd, do = sample_hp(hp)
param = [fcn, wd, do, lr]
fold_count = 0
ensembled_p = {}
results.loc[_, ["wd", "do", "lr"]] = [wd, do, lr]
for data_train, data_val in fold_generator(data, k):
model = load_model(data_train[0].shape[1], param)
es = EarlyStopping(monitor='val_loss', mode='min',
verbose=1, patience=epoch_patience)
h = model.fit(x=data_train[0], y=data_train[1],
batch_size=16,
epochs=max_epoch,
verbose=0,
validation_data=data_val,
shuffle=True,
callbacks=[es])
pred = model.predict(data_test[0])
ensembled_p[fold_count] = pred
results.loc[_, fold_count] = h.history["val_accuracy"][-1]
del model
K.clear_session()
fold_count += 1
test_predictions[_] = ensembled_p
mean_val_idx = results[list(range(k))].mean(axis=1).argmax()
# cv score
param = [fcn] + [results.loc[mean_val_idx, el] for el in ["wd", "do", "lr"]]
model = load_model(data_train[0].shape[1], param)
model.fit(x=data[0], y=data[1],
batch_size=16,
epochs=max_epoch,
verbose=0,
shuffle=True)
score = performance(model, data_test)
# bncv
validation_scores = results.loc[mean_val_idx, list(range(k))]
validation_scores = validation_scores.sort_values(ascending=False)
bncv1 = aggre(validation_scores, test_predictions[mean_val_idx], 1, data_test[1])
bncv3 = aggre(validation_scores, test_predictions[mean_val_idx], 3, data_test[1])
bncv5 = aggre(validation_scores, test_predictions[mean_val_idx], 5, data_test[1])
return score, bncv1, bncv3, bncv5
def aggre(series_scores, dic_pred_test, order, y_test):
results = np.zeros_like(dic_pred_test[0])
for i in range(order):
results += dic_pred_test[i]
results /= order
y_ens = results.argmax(axis=1)
return accuracy_score(y_test.argmax(axis=1), y_ens)
def repeat(num):
# n = 336 => 350
# NCV: 5 folds in 5 folds => test_fold ~ 350 / 5 => 70
# val fold => (350 - 70) / 5 = 56
# train fold => 224
if num == "1":
repeats = 1000
repeats_simulated_data = 1
elif num == "2":
repeats = 10
repeats_simulated_data = 10
n_train = 224
n_val = 56
n_loads = 5000
p = 256
list_std = list(range(1, 21))
name = f"repeated_experiment_sco_{num}.csv"
e_name = f"repeated_experiment_epo_{num}.csv"
results = | pd.DataFrame(columns=list_std) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 25 19:48:03 2019
@authors: enzoampil & jpdeleon
"""
# Import standard library
import os
import requests
from datetime import datetime, timedelta
import time
from pathlib import Path
from pkg_resources import resource_filename
# Import modules
import pandas as pd
import numpy as np
import lxml.html as LH
from tqdm import tqdm
import tweepy
import yfinance as yf
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import nltk
from urllib.request import urlopen
from bs4 import BeautifulSoup
import ccxt
DATA_PATH = resource_filename(__name__, "data")
PSE_TWITTER_ACCOUNTS = [
"phstockexchange",
"colfinancial",
"firstmetrosec",
"BPItrade",
"Philstocks_",
"itradeph",
"UTradePH",
"wealthsec",
]
DATA_FORMAT_COLS = {
"o": "open",
"h": "high",
"l": "low",
"c": "close",
"v": "volume",
"i": "openinterest",
}
CALENDAR_FORMAT = "%Y-%m-%d"
def get_stock_table(stock_table_fp=None):
"""
Returns dataframe containing info about PSE listed stocks while also saving it
"""
if stock_table_fp is None:
stock_table_fp = Path(DATA_PATH, "stock_table.csv")
stock_table = pd.DataFrame(
columns=[
"Company Name",
"Stock Symbol",
"Sector",
"Subsector",
"Listing Date",
"company_id",
"security_id",
]
)
data = {
"pageNo": "1",
"companyId": "",
"keyword": "",
"sortType": "",
"dateSortType": "DESC",
"cmpySortType": "ASC",
"symbolSortType": "ASC",
"sector": "ALL",
"subsector": "ALL",
}
for p in range(1, 7):
print(str(p) + " out of " + str(7 - 1) + " pages", end="\r")
data["pageNo"] = str(p)
r = requests.post(
url="https://edge.pse.com.ph/companyDirectory/search.ax", data=data
)
table = LH.fromstring(r.text)
page_df = (
pd.concat(
[
pd.read_html(r.text)[0],
pd.DataFrame(
{"attr": table.xpath("//tr/td/a/@onclick")[::2]}
),
],
axis=1,
)
.assign(
company_id=lambda x: x["attr"].apply(
lambda s: s[s.index("(") + 2 : s.index(",") - 1]
)
)
.assign(
security_id=lambda x: x["attr"].apply(
lambda s: s[s.index(",") + 2 : s.index(")") - 1]
)
)
.drop(["attr"], axis=1)
)
stock_table = stock_table.append(page_df)
stock_table.to_csv(stock_table_fp, index=False)
return stock_table
def fill_gaps(df):
"""
Fills gaps of time series dataframe with NaN rows
"""
idx = pd.period_range(df.index.min(), df.index.max(), freq="D")
# idx_forecast = pd.period_range(start_datetime, end_datetime, freq="H")
ts = pd.DataFrame({"empty": [0 for i in range(idx.shape[0])]}, index=idx)
ts = ts.to_timestamp()
df_filled = pd.concat([df, ts], axis=1)
del df_filled["empty"]
return df_filled
def get_pse_data_old(
symbol, start_date, end_date, stock_table_fp=None, verbose=True
):
"""Returns pricing data for a specified stock.
Parameters
----------
symbol : str
Symbol of the stock in the PSE. You can refer to this link: https://www.pesobility.com/stock.
start_date : str
Starting date (YYYY-MM-DD) of the period that you want to get data on
end_date : str
Ending date (YYYY-MM-DD) of the period you want to get data on
stock_table_fp : str
File path of an existing stock table or where a newly downloaded table should be saved
Returns
-------
pandas.DataFrame
Stock data (in OHLCV format) for the specified company and date range
"""
if stock_table_fp is None:
stock_table_fp = Path(DATA_PATH, "stock_table.csv")
if stock_table_fp.exists():
stock_table = pd.read_csv(stock_table_fp)
if verbose:
print("Loaded: ", stock_table_fp)
else:
stock_table = get_stock_table(stock_table_fp=stock_table_fp)
data = {
"cmpy_id": int(
stock_table["company_id"][
stock_table["Stock Symbol"] == symbol
].values[0]
),
"security_id": int(
stock_table["security_id"][
stock_table["Stock Symbol"] == symbol
].values[0]
),
"startDate": datetime.strptime(start_date, CALENDAR_FORMAT).strftime(
"%m-%d-%Y"
),
"endDate": datetime.strptime(end_date, CALENDAR_FORMAT).strftime(
"%m-%d-%Y"
),
}
r = requests.post(
url="https://edge.pse.com.ph/common/DisclosureCht.ax", json=data
)
df = pd.DataFrame(r.json()["chartData"])
rename_dict = {
"CHART_DATE": "dt",
"OPEN": "open",
"HIGH": "high",
"LOW": "low",
"CLOSE": "close",
"VALUE": "value",
}
rename_list = ["dt", "open", "high", "low", "close", "value"]
df = df.rename(columns=rename_dict)[rename_list].drop_duplicates()
df.dt = pd.to_datetime(df.dt)
df = df.set_index("dt")
return df
def process_phisix_date_dict(phisix_dict):
date = datetime.strftime(
pd.to_datetime(phisix_dict["as_of"]).date(), CALENDAR_FORMAT
)
stock_dict = phisix_dict["stock"][0]
stock_price_dict = stock_dict["price"]
name = stock_dict["name"]
currency = stock_price_dict["currency"]
closing_price = stock_price_dict["amount"]
percent_change = stock_dict["percent_change"]
volume = stock_dict["volume"]
symbol = stock_dict["symbol"]
return {
"dt": date,
"name": name,
"currency": currency,
"close": closing_price,
"percent_change": percent_change,
"volume": volume,
"symbol": symbol,
}
def get_phisix_data_by_date(symbol, date):
"""
Requests data in json format from phisix API
Note: new API endpoint is now used, with fallback to old API
"""
new_endpoint = "http://1.phisix-api.appspot.com/stocks/"
url = new_endpoint + "{}.{}.json".format(symbol, date)
res = requests.get(url)
if res.ok:
unprocessed_dict = res.json()
processed_dict = process_phisix_date_dict(unprocessed_dict)
return processed_dict
else:
# fallback to old endpoint
old_endpoint = "http://phisix-api2.appspot.com/stocks/"
url = old_endpoint + "{}.{}.json".format(symbol, date)
res = requests.get(url)
if res.ok:
unprocessed_dict = res.json()
processed_dict = process_phisix_date_dict(unprocessed_dict)
return processed_dict
else:
if res.status_code == 500:
# server error
res.raise_for_status()
else:
# non-trading day
return None
def update_pse_data_cache(start_date="2010-01-01", verbose=True):
"""
Downloads DOHLC data of all PSE comapnies using get_pse_old
and saves as .zip in /data to be used as cache
NOTE: useful to add sector in column
"""
if verbose:
print("Updating cache...")
date_today = datetime.now().date().strftime("%Y-%m-%d")
ifp = Path(DATA_PATH, "company_names.csv")
names = | pd.read_csv(ifp) | pandas.read_csv |
import json
import numpy as np
from bert4keras.backend import keras, search_layer, K
from bert4keras.tokenizers import Tokenizer
from bert4keras.models import build_transformer_model
from bert4keras.optimizers import Adam
from bert4keras.snippets import sequence_padding, DataGenerator
from keras.layers import Lambda, Dense,Dropout
from tqdm import tqdm
from keras.utils import plot_model
import argparse
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--load_model',type=int,help='1 means loading,other means not loading ')
parser.add_argument('--epoch',type=int)
parser.add_argument('--sample_num',type=int,help='the number of the train samples')
parser.add_argument('--train_layer',type=int,help='the number of the train layers')
parser.add_argument('--en',type=str,help='true or other')
args = parser.parse_args()
config_path = '/home/hpe/project/zfy/offensive/bert/bert_config.json'
checkpoint_path = '/home/hpe/project/zfy/bert_model/multilingual_L-12_H-768_A-12/bert_model.ckpt'
dict_path = '/home/hpe/project/zfy/bert_model/multilingual_L-12_H-768_A-12/vocab.txt'
import os
import pandas as pd
root_path = '../code/examples/english/data'
file = os.path.join(root_path,'olid-training-v1.0.tsv')
df = pd.read_csv(file,sep='\t')
test = df.rename(columns={'tweet': 'text', 'subtask_a': 'labels'})
data = test[['text', 'labels']]
len_num = data.shape[0]
texts = data['text'].tolist()
labels_ = data['labels'].tolist()
labels = list(set(data['labels'].tolist()))
num_classes = len(labels)
maxlen = 120
batch_size = 64
#建立分词器
tokenizer = Tokenizer(dict_path, do_lower_case=True)
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = {'batch':[], 'epoch':[]}
self.accuracy = {'batch':[], 'epoch':[]}
self.val_loss = {'batch':[], 'epoch':[]}
self.val_acc = {'batch':[], 'epoch':[]}
def on_batch_end(self, batch, logs={}):
self.losses['batch'].append(logs.get('loss'))
self.accuracy['batch'].append(logs.get('acc'))
self.val_loss['batch'].append(logs.get('val_loss'))
self.val_acc['batch'].append(logs.get('val_acc'))
def on_epoch_end(self, batch, logs={}):
self.losses['epoch'].append(logs.get('loss'))
self.accuracy['epoch'].append(logs.get('acc'))
self.val_loss['epoch'].append(logs.get('val_loss'))
self.val_acc['epoch'].append(logs.get('val_acc'))
#创建一个LossHistory的实例
history = LossHistory()
class data_generator(DataGenerator):
"""数据生成器
"""
def __iter__(self, random=True):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
if len(self.data[0]) == 2:
for is_end, (text, label) in self.sample(random):
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
if len(batch_token_ids) == self.batch_size or is_end:
batch_token_ids = sequence_padding(batch_token_ids)
batch_segment_ids = sequence_padding(batch_segment_ids)
batch_labels = sequence_padding(batch_labels)
yield [batch_token_ids, batch_segment_ids], batch_labels
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
if len(self.data[0]) == 3:
for is_end, (texta, textb,label) in self.sample(random):
token_ids, segment_ids = tokenizer.encode(texta, textb, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
if len(batch_token_ids) == self.batch_size or is_end:
batch_token_ids = sequence_padding(batch_token_ids)
batch_segment_ids = sequence_padding(batch_segment_ids)
batch_labels = sequence_padding(batch_labels)
yield [batch_token_ids, batch_segment_ids], batch_labels
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
def evaluate(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
y_true = y_true[:, 0]
total += len(y_true)
right += (y_true == y_pred).sum()
return right / total
from tqdm import tqdm
def index(label):
if label.upper()[:3] == 'OFF':
return labels.index('OFF')
else:
return labels.index("NOT")
#丹麦语
file = '../code/examples/danish/data/offenseval-da-training-v1.tsv'
df = | pd.read_csv(file,sep='\t') | pandas.read_csv |
# Author: <NAME>
# Email: <EMAIL>
# License: MIT License
import numpy as np
import pandas as pd
class Results:
def __init__(self, results_list, opt_pros):
self.results_list = results_list
self.opt_pros = opt_pros
self.objFunc2results = {}
self.search_id2results = {}
def _sort_results_objFunc(self, objective_function):
best_score = -np.inf
best_para = None
search_data = None
search_data_list = []
for results_ in self.results_list:
nth_process = results_["nth_process"]
opt = self.opt_pros[nth_process]
objective_function_ = opt.objective_function
search_space_ = opt.s_space()
params = list(search_space_.keys())
if objective_function_ != objective_function:
continue
if results_["best_score"] > best_score:
best_score = results_["best_score"]
best_para = results_["best_para"]
search_data = results_["search_data"]
search_data["eval_times"] = results_["eval_times"]
search_data["iter_times"] = results_["iter_times"]
search_data_list.append(search_data)
if len(search_data_list) > 0:
search_data = | pd.concat(search_data_list) | pandas.concat |
#!/usr/bin/env python3
"""Tool to convert Thermo source data in to MS2Analyte input format"""
import pandas as pd
import os
import glob
import csv
import pymzml
import xml.etree.ElementTree as ET
import ms2analyte.config as config
def orbitrap(file_path):
"""Import Orbitrap data from XCalibur export. Designed for scan by scan Orbitrap data.
Original export of example data performed by Cech lab @ UNCG. Example data in MS_data external in Cech directory
"""
headers = ["scan", "rt", "mz", "drift", "intensity"]
input_data = []
intensity_cutoff = config.intensity_cutoff
for path_name in glob.glob(os.path.join(file_path, "*.mzML.binary.*.txt")):
file_name = path_name.split("/")[-1]
scan_number = int(file_name.split(".")[-2])
with open(path_name) as f:
for row in f:
if row.startswith("# retentionTime:"):
retention_time = float(row.split(" ")[-1])
break
with open(path_name) as f:
csv_f = csv.reader(f, delimiter="\t")
for row in csv_f:
if not row[0].startswith("#"):
intensity = round(float(row[1]), 0)
mass = round(float(row[0]), 4)
if intensity >= intensity_cutoff:
input_data.append([scan_number, retention_time, mass, None, intensity])
orbitrap_dataframe = | pd.DataFrame.from_records(input_data, columns=headers, index=str) | pandas.DataFrame.from_records |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Analize the SRAG data and export the statistics to generate the figure 1
Needs the filter_SRAG.py csv output to run
"""
import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
from scipy.stats import norm, binom
def median_estimate(X, CI):
n = len(X)
lmd = binom.ppf((1-CI)/2, n, 0.5)
mmd = binom.ppf((1+CI)/2, n, 0.5)
Xo = np.sort(X)
return np.median(Xo), Xo[int(lmd)], Xo[int(mmd)-1]
def freq_estimate(X, CI):
n = len(X)
P = (X==True).sum()
lmd = binom.ppf((1-CI)/2, n, P/n)
mmd = binom.ppf((1+CI)/2, n, P/n)
return P/n, lmd/n, mmd/n
def create_filter_cont(data, ycol, xcols, fname, col_extra=None, CI=0.95):
lme = norm.ppf((1-CI)/2)
mme = norm.ppf((1+CI)/2)
data = data[~pd.isna(data[ycol])]
saida = {'name': [], 'mean': [], 'CIme_L':[], 'CIme_H':[], 'median':[], \
'CImd_L':[], 'CImd_H':[]}
saida['name'].append('All')
saida['mean'].append(np.mean(data[ycol]))
saida['CIme_L'].append(np.mean(data[ycol]) + lme*np.std(data[ycol])/len(data[ycol]))
saida['CIme_H'].append(np.mean(data[ycol]) + mme*np.std(data[ycol])/len(data[ycol]))
med, cl, ch = median_estimate(data[ycol], CI)
saida['median'].append(med)
saida['CImd_L'].append(cl)
saida['CImd_H'].append(ch)
if col_extra != None:
for val_extra in data[col_extra].unique():
data_extra = data[data[col_extra]==val_extra]
saida['name'].append('All_'+str(val_extra))
saida['mean'].append(np.mean(data_extra[ycol]))
saida['CIme_L'].append(np.mean(data_extra[ycol]) + lme*np.std(data_extra[ycol])/len(data_extra[ycol]))
saida['CIme_H'].append(np.mean(data_extra[ycol]) + mme*np.std(data_extra[ycol])/len(data_extra[ycol]))
med, cl, ch = median_estimate(data_extra[ycol], CI)
saida['median'].append(med)
saida['CImd_L'].append(cl)
saida['CImd_H'].append(ch)
for xcol in xcols:
for val in data[xcol].unique():
if val is np.nan:
data_fil = data[pd.isna(data[xcol])]
else:
data_fil = data[data[xcol]==val]
data_fil = data_fil[~pd.isna(data_fil[ycol])]
saida['name'].append(str(xcol)+'_'+str(val))
saida['mean'].append(np.mean(data_fil[ycol]))
saida['CIme_L'].append(np.mean(data_fil[ycol]) + lme*np.std(data_fil[ycol])/len(data_fil[ycol]))
saida['CIme_H'].append(np.mean(data_fil[ycol]) + mme*np.std(data_fil[ycol])/len(data_fil[ycol]))
med, cl, ch = median_estimate(data_fil[ycol], CI)
saida['median'].append(med)
saida['CImd_L'].append(cl)
saida['CImd_H'].append(ch)
if col_extra != None:
for val_extra in data_fil[col_extra].unique():
data_extra = data_fil[data_fil[col_extra]==val_extra]
saida['name'].append(str(xcol)+'_'+str(val)+'_'+str(val_extra))
saida['mean'].append(np.mean(data_extra[ycol]))
saida['CIme_L'].append(np.mean(data_extra[ycol]) + lme*np.std(data_extra[ycol])/len(data_extra[ycol]))
saida['CIme_H'].append(np.mean(data_extra[ycol]) + mme*np.std(data_extra[ycol])/len(data_extra[ycol]))
med, cl, ch = median_estimate(data_extra[ycol], CI)
saida['median'].append(med)
saida['CImd_L'].append(cl)
saida['CImd_H'].append(ch)
saida = pd.DataFrame(saida)
saida.to_csv(fname, index=False)
def create_filter_binary(data, ycol, xcols, fname, CI=0.95):
lme = norm.ppf((1-CI)/2)
mme = norm.ppf((1+CI)/2)
data = data[~pd.isna(data[ycol])]
saida = {'name': [], 'mean': [], 'CIme_L':[], 'CIme_H':[]}
mea, cl, ch = freq_estimate(data[ycol], CI)
saida['name'].append('All')
saida['mean'].append(mea)
saida['CIme_L'].append(cl)
saida['CIme_H'].append(ch)
for xcol in xcols:
for val in data[xcol].unique():
if val is np.nan:
data_fil = data[ | pd.isna(data[xcol]) | pandas.isna |
from textwrap import dedent
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import pytest
from .. import params
@pytest.fixture
def initial_params_dict():
return dict(alpha=1.0, beta=2.0)
@pytest.fixture
def initial_params_series():
return pd.Series([1.0, 2.0], ["alpha", "beta"])
@pytest.fixture
def basic_paramset():
p = pd.Series([1.0, 2.0], ["alpha", "beta"])
return params.ParamSet(p)
@pytest.fixture
def fixed_paramset():
p = pd.Series([1.0, 2.0], ["alpha", "beta"])
return params.ParamSet(p, ["beta"])
def test_paramset_series_initialization(initial_params_series):
p = params.ParamSet(initial_params_series)
pdt.assert_series_equal(p.params, initial_params_series)
pdt.assert_series_equal(p.free, initial_params_series)
pdt.assert_series_equal(p.fixed, pd.Series([]))
def test_paramset_dict_initialization(initial_params_dict):
p = params.ParamSet(initial_params_dict)
params_series = pd.Series(initial_params_dict)
| pdt.assert_series_equal(p.params, params_series) | pandas.util.testing.assert_series_equal |
import os
from numbers import Integral
import pandas as pd
import polars as pl
import pyarrow as pa
from featherstore._metadata import METADATA_FOLDER_NAME, Metadata
from featherstore._table import common
from featherstore._table import _table_utils
def table_not_exists(table_path):
table_name = table_path.rsplit('/')[-1]
if not os.path.exists(table_path):
raise FileNotFoundError(f"Table '{table_name}' doesn't exist")
def table_already_exists(table_path):
table_name = table_path.rsplit('/')[-1]
if os.path.exists(table_path):
raise OSError(f"A table with name '{table_name}' already exists")
def table_name_is_not_str(table_name):
if not isinstance(table_name, str):
raise TypeError(
f"'table_name' must be a str (is type {type(table_name)})")
def table_name_is_forbidden(table_name):
if table_name == METADATA_FOLDER_NAME:
raise ValueError(f"Table name '{METADATA_FOLDER_NAME}' is forbidden")
def df_is_not_supported_table_dtype(df):
if not isinstance(df, (pd.DataFrame, pd.Series, pl.DataFrame, pa.Table)):
raise TypeError(f"'df' must be a supported DataFrame dtype (is type {type(df)})")
def df_is_not_pandas_table(df):
if not isinstance(df, (pd.DataFrame, pd.Series)):
raise TypeError(
f"'df' must be a pd.DataFrame or pd.Series (is type {type(df)})")
def to_argument_is_not_list(to):
is_valid_col_format = isinstance(to, list)
if not is_valid_col_format:
raise TypeError(f"'to' must be of type list (is type {type(to)})")
def cols_argument_is_not_list_or_none(cols):
is_valid_col_format = isinstance(cols, (list, type(None)))
if not is_valid_col_format:
raise TypeError(f"'cols' must be either list or None (is type {type(cols)})")
def cols_argument_is_not_list_or_dict(cols):
is_valid_col_format = isinstance(cols, (list, dict))
if not is_valid_col_format:
raise TypeError(f"'cols' must be either list or dict (is type {type(cols)})")
def cols_argument_items_is_not_str(cols):
if isinstance(cols, dict):
col_elements_are_str = all(isinstance(item, str) for item in cols.keys())
else:
col_elements_are_str = all(isinstance(item, str) for item in cols)
if not col_elements_are_str:
raise TypeError("Elements in 'cols' must be of type str")
def cols_does_not_match(df, table_path):
stored_data_cols = Metadata(table_path, "table")["columns"]
has_default_index = Metadata(table_path, "table")["has_default_index"]
new_data_cols = _table_utils.get_col_names(df, has_default_index)
if sorted(new_data_cols) != sorted(stored_data_cols):
raise ValueError("New and old columns doesn't match")
def cols_not_in_table(cols, table_path):
table_metadata = Metadata(table_path, 'table')
stored_cols = table_metadata["columns"]
cols = common.filter_cols_if_like_provided(cols, stored_cols)
some_cols_not_in_stored_cols = set(cols) - set(stored_cols)
if some_cols_not_in_stored_cols:
raise IndexError("Trying to access a column not found in table")
def rows_argument_is_not_supported_dtype(rows):
is_valid_row_format = isinstance(rows, (list, pd.Index, type(None)))
if not is_valid_row_format:
raise TypeError(f"'rows' must be either List, pd.Index or None (is type {type(rows)})")
def rows_argument_items_dtype_not_same_as_index(rows, table_path):
index_dtype = Metadata(table_path, "table")["index_dtype"]
if rows is not None and not _rows_dtype_matches_index(rows, index_dtype):
raise TypeError("'rows' dtype doesn't match table index dtype")
def _rows_dtype_matches_index(rows, index_dtype):
row = rows[-1]
matches_dtime_idx = _check_if_row_and_index_is_temporal(row, index_dtype)
matches_str_idx = _check_if_row_and_index_is_str(row, index_dtype)
matches_int_idx = _check_if_row_and_index_is_int(row, index_dtype)
row_type_matches_idx = matches_dtime_idx or matches_str_idx or matches_int_idx
return row_type_matches_idx
def _check_if_row_and_index_is_temporal(row, index_dtype):
if _table_utils.str_is_temporal_dtype(index_dtype):
return _isinstance_temporal(row)
return False
def _check_if_row_and_index_is_str(row, index_dtype):
if _table_utils.str_is_string_dtype(index_dtype):
return _isinstance_str(row)
return False
def _check_if_row_and_index_is_int(row, index_dtype):
if _table_utils.str_is_int_dtype(index_dtype):
return _isinstance_int(row)
return False
def _isinstance_temporal(obj):
try:
_ = pd.to_datetime(obj)
is_temporal = True
except Exception:
is_temporal = False
return is_temporal
def _isinstance_str(obj):
try:
is_str = pa.types.is_string(obj) or pa.types.is_large_string(obj)
except AttributeError:
is_str = isinstance(obj, str)
return is_str
def _isinstance_int(obj):
try:
is_int = pa.types.is_integer(obj)
except AttributeError:
is_int = isinstance(obj, Integral)
return is_int
def index_dtype_not_same_as_stored_index(df, table_path):
if isinstance(df, (pd.DataFrame, pd.Series)):
index_type = str(pa.Array.from_pandas(df.index).type)
stored_index_type = Metadata(table_path, "table")["index_dtype"]
if index_type != stored_index_type:
raise TypeError("New and old index types do not match")
def col_names_contains_duplicates(cols):
cols = pd.Index(cols)
if cols.has_duplicates:
raise IndexError("Column names must be unique")
def col_names_are_forbidden(cols):
cols = | pd.Index(cols) | pandas.Index |
# Copyright (c) 2019-2021 - for information on the respective copyright owner
# see the NOTICE file and/or the repository
# https://github.com/boschresearch/pylife
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "<NAME>"
__maintainer__ = "<NAME>"
import pandas as pd
import pylife.mesh.meshsignal as meshsignal
@ | pd.api.extensions.register_dataframe_accessor('hotspot') | pandas.api.extensions.register_dataframe_accessor |
"""Console script for koapy."""
import os
import click
from koapy.cli.commands.disable import disable
from koapy.cli.commands.enable import enable
from koapy.cli.commands.generate import generate
from koapy.cli.commands.get import get
from koapy.cli.commands.install import install
from koapy.cli.commands.serve import serve
from koapy.cli.commands.show import show
from koapy.cli.commands.uninstall import uninstall
from koapy.cli.commands.update import update
from koapy.cli.utils.credentials import get_credentials
from koapy.cli.utils.fail_with_usage import fail_with_usage
from koapy.cli.utils.verbose_option import verbose_option
from koapy.config import default_encoding
from koapy.utils.logging import get_logger
logger = get_logger(__name__)
help_option_names = ["-h", "--help"]
context_settings = dict(
help_option_names=help_option_names,
)
@click.group(context_settings=context_settings)
@click.version_option(message="%(version)s")
def cli():
pass
cli.add_command(disable)
cli.add_command(enable)
cli.add_command(generate)
cli.add_command(get)
cli.add_command(install)
cli.add_command(serve)
cli.add_command(show)
cli.add_command(uninstall)
cli.add_command(update)
@cli.command(short_help="Ensure logged in when server is up.")
@click.option(
"-i",
"--interactive",
is_flag=True,
help="Put login information with prompts. Disables auto login for manual login.",
)
@click.option(
"-d",
"--disable-auto-login",
is_flag=True,
help="Disable auto login and use credentials given explicitly from config file.",
)
@click.option(
"-p", "--port", metavar="PORT", help="Port number of grpc server (optional)."
)
@verbose_option()
def login(interactive, disable_auto_login, port):
credentials = get_credentials(interactive)
from koapy.backend.kiwoom_open_api_plus.grpc.KiwoomOpenApiPlusServiceClient import (
KiwoomOpenApiPlusServiceClient,
)
with KiwoomOpenApiPlusServiceClient(port=port, check_timeout=1) as context:
state = context.GetConnectState()
if state == 0:
click.echo("Logging in...")
else:
click.echo("Already logged in.")
if context.IsAutoLoginEnabled():
if not disable_auto_login:
credentials = None
context.EnsureConnected(credentials)
gubun = context.GetServerGubun()
if gubun == "1":
click.echo("Logged into Simulation server.")
else:
click.echo("Logged into Real server.")
@cli.command(short_help="Watch realtime data.")
@click.option(
"-c",
"--code",
"codes",
metavar="CODE",
multiple=True,
help="Stock code to get. Can set multiple times.",
)
@click.option(
"-i",
"--input",
metavar="FILENAME",
type=click.Path(),
help="Text or excel file containing codes. Alternative to --code option.",
)
@click.option(
"-f",
"--fid",
"fids",
metavar="FID",
multiple=True,
help="FID to get. Can set multiple times.",
)
@click.option(
"-t", "--realtype", metavar="REALTYPE", help="Real type name. Alternative to --fid."
)
@click.option(
"-o",
"--output",
metavar="FILENAME",
type=click.File("w", lazy=True),
default="-",
help="Output filename (optional).",
)
@click.option(
"-f",
"--format",
metavar="FORMAT",
type=click.Choice(["json", "md"], case_sensitive=False),
default="json",
help="Output format [json|md].",
)
@click.option(
"-p", "--port", metavar="PORT", help="Port number of grpc server (optional)."
)
@verbose_option()
def watch(codes, input, fids, realtype, output, format, port):
# pylint: disable=redefined-builtin
if (codes, fids, realtype) == (tuple(), tuple(), None):
fail_with_usage()
codes_len = len(codes)
if codes_len == 0:
if input is None:
fail_with_usage("Either code or input should be given.")
if not os.path.exists(input):
fail_with_usage("Given input does not exist.")
if os.path.isfile(input):
if input.endswith(".xlsx"):
import pandas as pd
df = pd.read_excel(input, dtype=str)
code_column = "종목코드"
if code_column in df:
codes = df[code_column]
else:
codes = df.iloc[0]
codes_len = len(codes)
elif input.endswith(".txt"):
with open(input, "r", encoding=default_encoding) as f:
codes = [line.strip() for line in f]
codes_len = len(codes)
else:
fail_with_usage("Unrecognized input type.")
else:
fail_with_usage("Unrecognized input type.")
if realtype is not None:
from koapy.backend.kiwoom_open_api_plus.core.KiwoomOpenApiPlusRealType import (
KiwoomOpenApiPlusRealType,
)
fids_from_realtype = KiwoomOpenApiPlusRealType.get_fids_by_realtype_name(
realtype
)
fids = list(set(fids).union(set(fids_from_realtype)))
if not codes:
fail_with_usage("No codes to watch. Set --code or --input.")
if not fids:
fail_with_usage("Cannot infer fids to watch. Set either --fid or --realtype.")
import datetime
import pandas as pd
from koapy.backend.kiwoom_open_api_plus.core.KiwoomOpenApiPlusEntrypoint import (
KiwoomOpenApiPlusEntrypoint,
)
from koapy.backend.kiwoom_open_api_plus.core.KiwoomOpenApiPlusRealType import (
KiwoomOpenApiPlusRealType,
)
def parse_message(message):
fids = message.single_data.names
names = [
KiwoomOpenApiPlusRealType.Fid.get_name_by_fid(fid, str(fid)) for fid in fids
]
values = message.single_data.values
dic = {
name: value for fid, name, value in zip(fids, names, values) if name != fid
}
series = | pd.Series(dic) | pandas.Series |
import collections
import itertools
import traceback
from pathlib import Path
import bs4
import numpy as np
import requests
import json
import urllib.request, urllib.error
import PIL
import pytesseract
import pandas as pd
import multiprocessing as mp
Msg = collections.namedtuple("Msg", ["event", "data"])
class ProcessManager:
def __init__(self, pyqt_signal: dict):
self.processes = {}
self.queue = mp.Queue()
self.pyqt_signal = pyqt_signal
@staticmethod
def _wrapper(func, pid, queue, args, kwargs):
func(*args, pid=pid, queue=queue, **kwargs) # function execution
@staticmethod
def _chunks(iterable, size):
"""Generate adjacent chunks of data"""
it = iter(iterable)
return iter(lambda: tuple(itertools.islice(it, size)), ())
def run(self, pid, func, *args, **kwargs):
"""Start processes individually with user-managed resources."""
args2 = (func, pid, self.queue, args, kwargs)
proc = mp.Process(target=self._wrapper, args=args2)
self.processes[pid] = {"pid": pid, "process": proc, "terminated": False} # saving processes in a dict
self.processes[pid]["process"].start()
def map(self, func, work, *args, max_processes=mp.cpu_count(), **kwargs):
"""Map a function onto multiple processes, with this class managing the input."""
work_list = self._chunks(work, max_processes) # dividing work into smaller chunks
for pid, work in enumerate(work_list):
kwargs["work"] = work
args2 = (func, pid, self.queue, args, kwargs)
proc = mp.Process(target=self._wrapper, args=args2)
self.processes[pid] = {"pid": pid, "process": proc, "terminated": False}
self.processes[pid]["process"].start()
def wait(self): # waiting for processes to finish work, while updating the GUI
return_list = []
terminated = False
while not terminated:
for _ in self.processes:
event, data = self.queue.get()
if event == "return_data": # event conditionals
return_list.append(data)
elif event == "pyqt_signal":
self.pyqt_signal[data[0]].emit(data[1]) # can emit whatever PyQt signal depending on a list.
elif event == "proc_terminate":
self.processes[data]["process"].join() # process is terminated
self.processes[data]["terminated"] = True
if all([self.processes[pid]["terminated"] for pid in self.processes]):
terminated = True
break
return return_list
def f_type_return(file, file_type_list: list): # takes string and file type list as input.
for f_type in file_type_list:
if str(file).lower().endswith(f_type):
return str(f_type)
def remove_files_in_directory(directory: Path, file_types: list):
files = [_ for _ in directory.glob("*") if f_type_return(_, file_types) in file_types]
for file_path in files:
file_path.unlink()
def get_id(temp_url: str):
found = False
first_index = temp_url.find("properties") + len("properties") + 1
index = first_index
while not found:
if not temp_url[index].isnumeric():
return temp_url[first_index:index]
else:
index += 1
if index > 100:
return 0
def bs_pull_links(df, image_link_format, pyqt_signal_dict):
pyqt_signal_dict["text_log"].emit("Starting to pull html data from links...")
for idx in range(len(df.index)):
temp_url = df.at[idx, "url"]
image_page = image_link_format % str(get_id(temp_url))
page = requests.get(image_page)
soup = bs4.BeautifulSoup(page.content, 'lxml')
script = soup.find_all("script")
fp_link = "Non-standard html: couldn't find window.PAGE_MODEL"
for s in script:
s = str(s)
if "window.PAGE_MODEL = " in s:
idx_1 = len("<script> window.PAGE_MODEL = ")
idx_2 = len(s) - len("</script>")
json_string = s[idx_1:idx_2]
json_object = json.loads(json_string)
# with open(Path(Path.cwd(), "floorplans", str(idx) + ".json"), "w") as json_file:
# json.dump(json_object, json_file, indent=4)
try:
if json_object["analyticsInfo"]["analyticsProperty"]["floorplanCount"] != 0:
fp_link = json_object["propertyData"]["floorplans"][0]["url"]
else:
fp_link = "No plan"
break
except KeyError:
fp_link = "No plan" # Occasionally bs4 doesn't return an analyticsInfo, rerunning it should work.
df.at[idx, "image url"] = fp_link
pyqt_signal_dict["progress_bar"].emit(round(100 * idx / len(df.index)))
def assign_titles(df, title_list):
for position, title, value in title_list:
df.insert(position, title, value)
# reminder: variable value will fill the column with itself, and the column will
# allow only one type. BE CAREFUL.
def download_images(df, image_folder: Path, image_types: list, pyqt_signal_dict):
pyqt_signal_dict["text_log"].emit("Downloading images...")
for idx in range(len(df.index)):
if df.at[idx, "image url"] != "No plan":
try:
urllib.request.urlretrieve(df.at[idx, "image url"],
Path(image_folder,
str(idx) + f_type_return(df.at[idx, "image url"].lower(),
image_types)).absolute())
df.at[idx, "filename"] = str(idx) + f_type_return(df.at[idx, "image url"].lower(), image_types)
except urllib.error.HTTPError:
df.at[idx, "Area (sqft)"] = "HTTP Retrieval Error"
except TypeError:
print(f"{df.at[idx, 'image url']}, {f_type_return(df.at[idx, 'image url'].lower(), image_types)}, "
f"{type(f_type_return(df.at[idx, 'image url'].lower(), image_types))}, type error")
pyqt_signal_dict["progress_bar"].emit(round(100 * idx / len(df.index)))
pyqt_signal_dict["text_log"].emit("Done!")
def process_images(df, image_folder: Path, image_types: list, pyqt_signal_dict):
pyqt_signal_dict["text_log"].emit("Processing images...")
for idx in range(len(df.index)):
if df.at[idx, "image url"] != "No plan":
try:
image = PIL.Image.open(Path(image_folder, df.at[idx, "filename"])).convert("L")
nx, ny = image.size
if int(nx) < 900 or int(ny) < 900:
image = image.resize((int(nx * 2), int(ny * 2)), PIL.Image.LANCZOS)
image.save(Path(image_folder, str(idx) + ".jpg"))
image.close()
if f_type_return(df.at[idx, "filename"], image_types) != ".jpg":
Path(image_folder, df.at[idx, "filename"]).unlink()
df.at[idx, "filename"] = str(idx) + ".jpg"
except Exception:
traceback.print_exc()
pyqt_signal_dict["progress_bar"].emit(round(100 * idx / len(df.index)))
pyqt_signal_dict["text_log"].emit("Done!")
def images_to_text(df: pd.DataFrame, image_folder: Path, pyqt_signal_dict):
pyqt_signal_dict["text_log"].emit("Converting images to text...")
image_list = list()
for idx in range(len(df.index)):
if df.at[idx, "image url"] != "No plan":
image_list.append((idx, df.at[idx, "filename"]))
proc_manager = ProcessManager(pyqt_signal_dict)
proc_manager.map(_images_to_text, image_list, image_folder)
list_of_dicts = proc_manager.wait()
pyqt_signal_dict["text_log"].emit("Done!")
return merge_dicts(list_of_dicts)
def _images_to_text(image_folder: Path, pid, queue, work=None):
"""
Any map function param must have a pid, queue and a work keyword argument. There can be multiple positional
arguments prepending pid and queue, and there can be keyword arguments in any order as long as the work kwarg
exists.
"""
pytesseract.pytesseract.tesseract_cmd = r'C:\\Program Files\\Tesseract-OCR\\tesseract.exe'
image_list = work
object_dict = dict()
for idx, filename in image_list:
try:
image = PIL.Image.open(Path(image_folder, filename))
text = pytesseract.image_to_string(image)
image.close()
object_dict[idx] = {"filename": filename, "text": text.lower()}
except PermissionError:
print(f"Permission error, filename: {filename}")
except Exception:
traceback.print_exc()
queue.put(Msg("pyqt_signal", ["progress_bar", round(100 * idx / len(image_list))]))
queue.put(Msg("return_data", object_dict))
queue.put(Msg("proc_terminate", pid))
def merge_dicts(dict_args):
"""
Given any number of dictionaries, shallow copy and merge into a new dict,
precedence goes to key-value pairs in latter dictionaries. Python 3.9+
"""
result = {}
for dictionary in dict_args:
result = result | dictionary
return result
def find_number(text: str):
_ = 0
while True:
if text[_].isnumeric():
first_idx = _
break
else:
_ += 1
if _ > len(text) - 1:
first_idx = 0
break
_ = first_idx
dot_count = 0
while True:
if text[_] == ".":
dot_count += 1
if dot_count > 1:
if text[_ - 1] == ".": # in the case of 123..
second_idx = _ - 2
else: # in the case of 1.23.
second_idx = _ - 1
break
if not text[_].isnumeric() and text[_] != ".":
second_idx = _
break
else:
_ += 1
if _ > len(text) - 1:
second_idx = 0
break
return first_idx, second_idx
def find_number_reverse(text: str, start=None):
_ = len(text) - 1 if start is None else start
while True:
if text[_].isnumeric():
second_idx = _ + 1
break
else:
_ -= 1
if _ < 0:
second_idx = 0
break
dot_count = 0
while True:
if text[_] == ".":
dot_count += 1
if dot_count > 1:
if text[_ + 1] == ".":
first_idx = _ + 2 # in the case of ..123
else:
first_idx = _ + 1 # in the case of .1.23
break
if not text[_].isnumeric() and text[_] != ".":
first_idx = _ + 1
break
else:
_ -= 1
if _ < 0:
first_idx = 0
break
return first_idx, second_idx
def clean_text(text, replace_set, allowed_char_set):
for _ in text: # removing any unrelated text
if _ not in allowed_char_set and not _.isnumeric():
replace_set.add(_)
for string in replace_set: # removing extra fluff from the text
text = text.replace(string, "")
return text
def find_area(df, image_dict: dict, keywords, pyqt_signal_dict: dict): # The bulk of the text recognition logic.
# a bona-fide nightmare.
pyqt_signal_dict["text_log"].emit("Processing text...")
unit_list = ["sqft", "sq.ft", "sqm", "sq.m", "ft", "m2"]
sqft_unit_list = ["sqft", "sq.ft", "ft"]
sqm_unit_list = ["sqm", "sq.m", "m2"]
replace_set = {"\n", " ", ":", "(approx.)", "approx"}
allowed_character_set = {"s", "q", "m", "f", "t", "."}
for idx in image_dict:
area_text = str() # reset area_text for the next loop.
for kw in keywords: # First stage recognition. If np.nan is still in the ws cell, then move on to second stage
# recognition logic.
if image_dict[idx]["text"].find(kw) != -1:
find_idx = image_dict[idx]["text"].find(kw)
if len(image_dict[idx]["text"]) < find_idx + len(kw) + 60:
area_text = image_dict[idx]["text"][find_idx + len(kw):]
else:
area_text = image_dict[idx]["text"][find_idx + len(kw):find_idx + len(kw) + 59]
area_text = clean_text(area_text, replace_set, allowed_character_set)
image_dict[idx]["text"] = area_text
if not any(map(str.isnumeric, area_text)):
break
s1_idx_1, s1_idx_2 = find_number(area_text) # stage 1 index 1 & 2: s1_idx_1/2
for unit in unit_list:
if area_text[s1_idx_2:s1_idx_2 + len(unit)] == unit:
area = np.nan
try:
area = float(area_text[s1_idx_1:s1_idx_2])
except ValueError:
print(f"Value error, ({s1_idx_1}:{s1_idx_2}): {area_text}\n{area_text[s1_idx_1:s1_idx_2]}")
if unit in ["sq.m", "sqm", "m2"]:
if area > 200: # if unusually big house, find the next number which is a sqft num
s1_2_idx_1, s1_2_idx_2 = find_number(area_text[s1_idx_2 + 1:])
area = float(area_text[s1_idx_2 + 1 + s1_2_idx_1:s1_idx_2 + 1 + s1_2_idx_2])
image_dict[idx]["exit code"] = "sqm > 200, stage 1"
# setting var area knowing this is a sqft value, since the other one is sqm.
# reverse can be done, ie if area > 2000 sqft, find sqm backup measurement.
else:
image_dict[idx]["exit code"] = "sqft, stage 1, no complications."
area = round(10.76 * float(area), 1) # conversion to sqft
df.at[idx, "Area (sqft)"] = area
break
break
if len(area_text) == 0: # if there are no kw in the text, assign the whole area_text from image_dict.
area_text = image_dict[idx]["text"]
if np.isnan(df.at[idx, "Area (sqft)"]) and any(map(str.isnumeric, area_text)):
# second stage, where we have to try identify the area via unit
# key words / str.find().
area_text = clean_text(area_text, replace_set, allowed_character_set)
image_dict[idx]["text"] = area_text
for unit in unit_list:
if area_text.find(unit) != -1:
s2_idx_1, s2_idx_2 = find_number_reverse(area_text, start=area_text.find(unit))
area = np.nan
try:
area = float(area_text[s2_idx_1:s2_idx_2])
except ValueError:
print(f"Value error, ({s2_idx_1}:{s2_idx_2}): {area_text}\n{area_text[s2_idx_1:s2_idx_2]}")
if unit in ["sq.m", "sqm", "m2"]:
area = round(10.76 * float(area), 1)
image_dict[idx]["exit code"] = "sqft, stage 2, no complications."
if area > 2000 or area < 1:
if unit in ["sq.m", "sqm", "m2"]: # in the case that area is larger than 2000sqft.
for unit_2 in sqft_unit_list:
if area_text.find(unit_2) != -1:
s2_idx_1, s2_idx_2 = find_number_reverse(area_text, start=area_text.find(unit_2))
area = float(area_text[s2_idx_1:s2_idx_2])
image_dict[idx]["exit code"] = "sqm > 200, stage 2"
break
else:
for unit_2 in sqm_unit_list:
if area_text.find(unit_2) != -1:
s2_idx_1, s2_idx_2 = find_number_reverse(area_text, start=area_text.find(unit_2))
area = float(area_text[s2_idx_1:s2_idx_2])
area = round(10.76 * float(area), 1)
image_dict[idx]["exit code"] = "sqft > 2000, stage 2"
break
df.at[idx, "Area (sqft)"] = area
break
image_dict[idx]["located area"] = df.at[idx, "Area (sqft)"]
with open(Path(Path.cwd(), "floorplans", "image_dict.json"), "w") as json_file:
json.dump(image_dict, json_file, indent=4)
pyqt_signal_dict["text_log"].emit("Done!")
def process_data(df, colour_dict, pyqt_signal_dict):
count, no_plan_count = 0, 0
for idx in range(len(df.index)):
if not np.isnan(df.at[idx, "Area (sqft)"]): # check yield
count += 1
if df.at[idx, "image url"] == "No plan": # yield takes no-plans into account, subtracts at the end.
no_plan_count += 1
value, area = df.at[idx, "price"], df.at[idx, "Area (sqft)"] # calculate ppsqft
if not | pd.isnull(value) | pandas.isnull |
import openpyxl
import pandas as pd
from datetime import datetime, timedelta
import xlsxwriter
now = datetime.now()
date_time = now.strftime("%m_%d_%Y %I_%M_%p")
federal_tax_rate_path = "./federaltaxrates.csv"
state_tax_rate_path = "./statetaxrates.csv"
city_tax_rate_path = "./NYCtaxrates.csv"
# calculate social security tax
class EffectiveFederalTax:
def __init__(self, salary, marital_status):
self.salary = salary
self.marital_status = marital_status
def calculateSocialSecurityTaxDue(self):
if self.salary >= 147000:
return 9114
else:
return round(self.salary * 0.062, 2)
# calculate federal income tax + remainder of fica (medicare) for single filers
class EffectiveFederalTaxSingle(EffectiveFederalTax):
def __init__(self, salary, deductions):
super().__init__(salary, "single")
self.deductions = deductions
def calculateFederalIncomeTaxDue(self):
federal_tax_rate_table = pd.read_csv(federal_tax_rate_path)
federal_tax_bracket_tier = 0
single_income_column = federal_tax_rate_table.columns.get_loc("Single Income")
single_income_percentage_tax_column = federal_tax_rate_table.columns.get_loc("Single Tax Rate")
max_index = len(list(federal_tax_rate_table.index)) - 1
while federal_tax_bracket_tier <= max_index and \
int(federal_tax_rate_table.iloc[federal_tax_bracket_tier, single_income_column]) < \
(self.salary - self.deductions):
federal_tax_bracket_tier += 1
federal_tax_bracket_tier -= 1
federal_tax_due = 0
counter = 0
while counter <= federal_tax_bracket_tier - 1:
federal_tax_due += (federal_tax_rate_table.iloc[counter + 1, single_income_column]
- federal_tax_rate_table.iloc[counter, single_income_column])\
* (float((federal_tax_rate_table.iloc[counter, single_income_percentage_tax_column])
.strip("%")) / 100)
counter += 1
marginal_tax_due = (self.salary - self.deductions - federal_tax_rate_table.iloc[federal_tax_bracket_tier,
single_income_column]) \
* (float((federal_tax_rate_table.iloc[federal_tax_bracket_tier,
single_income_percentage_tax_column]).strip("%")) / 100)
federal_tax_due += marginal_tax_due
return round(federal_tax_due, 2)
def calculateMedicareTaxDue(self):
if self.salary <= 200000:
return round(self.salary * 0.0145, 2)
else:
return round(self.salary * 0.0145 + (self.salary - 200000) * 0.009, 2)
def calculateTotalFederalTaxesDue(self):
return self.calculateSocialSecurityTaxDue() + self.calculateFederalIncomeTaxDue() \
+ self.calculateMedicareTaxDue()
# calculate federal income tax + remainder of fica (medicare) for married filers
class EffectiveFederalTaxMarried(EffectiveFederalTax):
def __init__(self, salary, deductions):
super().__init__(salary, "Married")
self.deductions = deductions
def calculateFederalIncomeTaxDue(self):
federal_tax_rate_table = pd.read_csv(federal_tax_rate_path)
federal_tax_bracket_tier = 0
married_income_column = federal_tax_rate_table.columns.get_loc("Married Income")
married_income_percentage_tax_column = federal_tax_rate_table.columns.get_loc("Married Tax Rate")
max_index = len(list(federal_tax_rate_table.index)) - 1
while federal_tax_bracket_tier <= max_index and \
int(federal_tax_rate_table.iloc[federal_tax_bracket_tier, married_income_column]) \
< (self.salary - self.deductions):
federal_tax_bracket_tier += 1
federal_tax_bracket_tier -= 1
federal_tax_due = 0
counter = 0
while counter <= federal_tax_bracket_tier - 1:
federal_tax_due += (federal_tax_rate_table.iloc[counter + 1, married_income_column]
- federal_tax_rate_table.iloc[counter, married_income_column])\
* (float((federal_tax_rate_table.iloc[counter, married_income_percentage_tax_column])
.strip("%")) / 100)
counter += 1
marginal_tax_due = (self.salary - self.deductions - federal_tax_rate_table.iloc[federal_tax_bracket_tier,
married_income_column])\
* (float((federal_tax_rate_table.iloc[federal_tax_bracket_tier,
married_income_percentage_tax_column]).strip("%")) / 100)
federal_tax_due += marginal_tax_due
return round(federal_tax_due, 2)
def calculateMedicareTaxDue(self):
if self.salary <= 250000:
return round(self.salary * 0.0145, 2)
else:
return round(self.salary * 0.0145 + (self.salary - 250000) * 0.009, 2)
def calculateTotalFederalTaxesDue(self):
return self.calculateSocialSecurityTaxDue() + self.calculateFederalIncomeTaxDue() \
+ self.calculateMedicareTaxDue()
class EffectiveStateTax:
def __init__(self, salary, state, marital_status):
self.salary = salary
self.state = state
self.marital_status = marital_status
# calculate state income tax for single filers
class EffectiveStateTaxSingle(EffectiveStateTax):
def __init__(self, salary, state, deductions):
super().__init__(salary, state, "single")
self.deductions = deductions
def calculateStateIncomeTaxDue(self):
state_tax_rate_table = pd.read_csv(state_tax_rate_path)
my_state_tax_rate_table = state_tax_rate_table.loc[state_tax_rate_table["State"] == str(self.state)]
single_income_column = my_state_tax_rate_table.columns.get_loc("Single Filer Brackets")
single_income_percentage_tax_column = my_state_tax_rate_table.columns.get_loc("Single Filer Rates")
max_index = my_state_tax_rate_table["Single Filer Rates"].notnull().sum() - 1
if my_state_tax_rate_table.iloc[max_index, single_income_percentage_tax_column] == "none":
return 0
state_tax_bracket_tier = 0
while state_tax_bracket_tier <= max_index and \
int(my_state_tax_rate_table.iloc[state_tax_bracket_tier, single_income_column]) \
< (self.salary - self.deductions):
state_tax_bracket_tier += 1
state_tax_bracket_tier -= 1
state_tax_due = 0
counter = 0
while counter <= state_tax_bracket_tier - 1:
state_tax_due += (my_state_tax_rate_table.iloc[counter + 1, single_income_column]
- my_state_tax_rate_table.iloc[counter, single_income_column])\
* (float((my_state_tax_rate_table.iloc[counter, single_income_percentage_tax_column])
.strip("%")) / 100)
counter += 1
marginal_tax_due = (self.salary - self.deductions - my_state_tax_rate_table.iloc[state_tax_bracket_tier,
single_income_column])\
* (float((my_state_tax_rate_table.iloc[state_tax_bracket_tier,
single_income_percentage_tax_column]).strip("%")) / 100)
state_tax_due += marginal_tax_due
return (round(state_tax_due, 2))
# calculate state income tax for married filers
class EffectiveStateTaxMarried(EffectiveStateTax):
def __init__(self, salary, state, deductions):
super().__init__(salary, state, "married")
self.deductions = deductions
def calculateStateIncomeTaxDue(self):
state_tax_rate_table = pd.read_csv(state_tax_rate_path)
my_state_tax_rate_table = state_tax_rate_table.loc[state_tax_rate_table["State"] == str(self.state)]
married_income_column = my_state_tax_rate_table.columns.get_loc("Married Filing Jointly Brackets")
married_income_percentage_tax_column = my_state_tax_rate_table.columns.get_loc("Married Filing Jointly Rates")
max_index = my_state_tax_rate_table["Married Filing Jointly Rates"].notnull().sum() - 1
if my_state_tax_rate_table.iloc[max_index, married_income_percentage_tax_column] == "none":
return 0
state_tax_bracket_tier = 0
while state_tax_bracket_tier <= max_index and \
int(my_state_tax_rate_table.iloc[state_tax_bracket_tier, married_income_column]) \
< (self.salary - self.deductions):
state_tax_bracket_tier += 1
state_tax_bracket_tier -= 1
state_tax_due = 0
counter = 0
while counter <= state_tax_bracket_tier - 1:
state_tax_due += (my_state_tax_rate_table.iloc[counter + 1, married_income_column]
- my_state_tax_rate_table.iloc[counter, married_income_column])\
* (float((my_state_tax_rate_table.iloc[counter, married_income_percentage_tax_column])
.strip("%")) / 100)
counter += 1
marginal_tax_due = (self.salary - self.deductions - my_state_tax_rate_table.iloc[state_tax_bracket_tier,
married_income_column])\
* (float((my_state_tax_rate_table.iloc[state_tax_bracket_tier,
married_income_percentage_tax_column]).strip("%")) / 100)
state_tax_due += marginal_tax_due
return (round(state_tax_due, 2))
class EffectiveCityTax:
def __init__(self, salary, city, marital_status):
self.salary = salary
self.city = city
self.marital_status = marital_status
# calculate city income tax for single filers
class EffectiveCityTaxSingle(EffectiveCityTax):
def __init__(self, salary, city, deductions):
super().__init__(salary, city, "single")
self.deductions = deductions
def calculateCityIncomeTaxDue(self):
city_tax_rate_table = pd.read_csv(city_tax_rate_path)
city_tax_bracket_tier = 0
single_income_column = city_tax_rate_table.columns.get_loc("Single Income")
single_income_percentage_tax_column = city_tax_rate_table.columns.get_loc("Single Tax Rate")
max_index = len(list(city_tax_rate_table.index)) - 1
while city_tax_bracket_tier <= max_index and \
int(city_tax_rate_table.iloc[city_tax_bracket_tier, single_income_column]) < (self.salary - self.deductions):
city_tax_bracket_tier += 1
city_tax_bracket_tier -= 1
city_tax_due = 0
counter = 0
while counter <= city_tax_bracket_tier - 1:
city_tax_due += (city_tax_rate_table.iloc[counter + 1, single_income_column]
- city_tax_rate_table.iloc[counter, single_income_column]) \
* (float((city_tax_rate_table.iloc[counter, single_income_percentage_tax_column])
.strip("%")) / 100)
counter += 1
marginal_tax_due = (self.salary - self.deductions - city_tax_rate_table.iloc[city_tax_bracket_tier,
single_income_column]) \
* (float((city_tax_rate_table.iloc[city_tax_bracket_tier,
single_income_percentage_tax_column]).strip("%")) / 100)
city_tax_due += marginal_tax_due
return round(city_tax_due, 2)
# calculate city income tax for married filers
class EffectiveCityTaxMarried(EffectiveCityTax):
def __init__(self, salary, city, deductions):
super().__init__(salary, city, "married")
self.deductions = deductions
def calculateCityIncomeTaxDue(self):
city_tax_rate_table = pd.read_csv(city_tax_rate_path)
city_tax_bracket_tier = 0
married_income_column = city_tax_rate_table.columns.get_loc("Married Income")
married_income_percentage_tax_column = city_tax_rate_table.columns.get_loc("Married Tax Rate")
max_index = len(list(city_tax_rate_table.index)) - 1
while city_tax_bracket_tier <= max_index and \
int(city_tax_rate_table.iloc[city_tax_bracket_tier, married_income_column]) < (self.salary - self.deductions):
city_tax_bracket_tier += 1
city_tax_bracket_tier -= 1
city_tax_due = 0
counter = 0
while counter <= city_tax_bracket_tier - 1:
city_tax_due += (city_tax_rate_table.iloc[counter + 1, married_income_column]
- city_tax_rate_table.iloc[counter, married_income_column]) \
* (float((city_tax_rate_table.iloc[counter, married_income_percentage_tax_column])
.strip("%")) / 100)
counter += 1
marginal_tax_due = (self.salary - self.deductions - city_tax_rate_table.iloc[city_tax_bracket_tier,
married_income_column]) \
* (float((city_tax_rate_table.iloc[city_tax_bracket_tier,
married_income_percentage_tax_column]).strip("%")) / 100)
city_tax_due += marginal_tax_due
return round(city_tax_due, 2)
# calculate effective tax rate from the classes/inheritance structure we have created
class TaxProfile:
def __init__(self, my_salary, spouse_salary, marital_status, state, city, federal_deductions, state_deductions,
city_deductions):
self.my_salary = my_salary
self.spouse_salary = spouse_salary
self.salary = my_salary + spouse_salary
self.marital_status = marital_status
self.state = state
self.city = city
self.federal_deductions = federal_deductions
self.state_deductions = state_deductions
self.city_deductions = city_deductions
def createTaxAnalysisWorkBook(self):
workbook = xlsxwriter.Workbook("./" + "Tax_Analysis_" + str(date_time) + ".xlsx")
workbook.close()
assumptions_table = [["salary", "{:,.2f}".format(self.salary)], ["marital_status", str(self.marital_status)],
["state", str(self.state)],
["city", str(self.city)],
["federal_deductions", "{:,.2f}".format(self.federal_deductions)],
["state_deductions", "{:,.2f}".format(self.state_deductions)],
["city_deductions", "{:,.2f}".format(self.city_deductions)]]
assumption_columns = ["Field", "Assumption"]
assumptions = | pd.DataFrame(assumptions_table, columns=assumption_columns) | pandas.DataFrame |
"""
This is the main file for the QA pipeline.
It utilizes question_understanding.py to extract type, concepts, and a query from a plaintext question.
It then utilizes information_retrieval.py to retrieve a list of PubMed articles pertaining to the query formed previously.
Finally it utilizes question_answering.py to generate an answer to the original question utilizing the information gathered in the previous two steps.
"""
import warnings
#warnings.simplefilter(action='ignore',category=UserWarning)
import json
import pandas as pd
import numpy as np
import torch
import torch.nn.functional as F
from transformers import BertTokenizer,BertForSequenceClassification,AdamW,BertConfig,get_linear_schedule_with_warmup
from lxml import etree as ET
import spacy
import scispacy
import en_core_sci_lg
from bs4 import BeautifulSoup as bs
import requests
import re
import os
import shutil
from tqdm import tqdm
import json
from whoosh import index
from whoosh.fields import Schema, TEXT, IDLIST, ID, NUMERIC
from whoosh.analysis import StemmingAnalyzer
from whoosh.qparser import QueryParser
import setup
import question_understanding
import information_retrieval
import question_answering
import PubmedA
if __name__ == "__main__":
# This ensures that all the packages are installed so that the system can work with the modules
data_folder = 'data_modules'
setup.setup_system(data_folder)
index_folder_name = 'index'
model_folder_name = 'model'
pubmed_official_index_name = 'pubmed_articles'
# This is for cpu support for non-NVIDEA cuda-capable machines.
spacy.prefer_gpu()
# initialize model
print("\033[95mInitializing model...\033[0m")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForSequenceClassification.from_pretrained(data_folder + os.path.sep + model_folder_name, cache_dir=None)
# load in BioBERT
print("\033[95mLoading BioBERT...\033[0m")
nlp = en_core_sci_lg.load()
# load index
index_var = 'full_index'
print("\033[95mLoading index...\033[0m")
# This is the schema for each query retrieved from
pubmed_article_ix = index.open_dir(data_folder + os.path.sep + index_folder_name + os.path.sep + index_var, indexname=pubmed_official_index_name)
qp = QueryParser("abstract_text", schema=Schema(
pmid=ID(stored=True),
title=TEXT(stored=True),
journal=TEXT(stored=True),
mesh_major=IDLIST(stored=True),
year=NUMERIC(stored=True),
abstract_text=TEXT(stored=True, analyzer=StemmingAnalyzer())))
batch_mode_answer = input("\033[95m Would you like to run batch mode? (y/n): \033[0m")
is_batch_mode = batch_mode_answer in ['Y','y','Yes','yes','Yep','yep','Yup','yup']
if is_batch_mode:
while(True):
# qu_input = "testing_datasets/input.csv"
# ir_input_generated = "tmp/ir/input/bioasq_qa.xml"
# ir_output_generated = "tmp/ir/output/bioasq_qa.xml"
# qa_output_generated_dir = "tmp/qa/"
# For evaluation
qu_input = "testing_datasets/evaluation_input.csv"
ir_input_generated = "tmp/ir/input/bioasq_qa_EVAL.xml"
ir_output_generated = "tmp/ir/output/bioasq_qa_EVAL.xml"
qa_output_generated_dir = "tmp/qa_EVAL/"
# User prompt
batch_options = """\033[95m
What part of the system do you want to test? (Any non-number input will Cancel)
0) Whole system
1) Question Understanding (QU)
2) Information Retrieval (IR)
3) Question Answering (QA)
4) QU + IR
5) IR + QA\033[0m
"""
batch_options_dict = {"0":"Whole system", "1": "Question Understanding", "2": "Information Retrieval", "3": "Question Answering", "4": "QU + IR", "5": "IR + QU"}
result = input(batch_options)
if(result):
if result in batch_options_dict.keys():
print(f"\033[95m{batch_options_dict.get(result)} selected.\033[0m")
if (result == "0"):
test_dataframe = pd.read_csv(qu_input,sep=',',header=0)
question_understanding.ask_and_receive(test_dataframe,device,tokenizer,model,nlp,batch_mode=True, output_file=ir_output_generated)
information_retrieval.batch_search(input_file=ir_input_generated, output_file=ir_output_generated, indexer=pubmed_article_ix, parser=qp)
question_answering.run_batch_mode(input_file=ir_output_generated,output_dir=qa_output_generated_dir)
elif(result == "1"):
test_dataframe = pd.read_csv(qu_input,sep=',',header=0)
question_understanding.ask_and_receive(test_dataframe,device,tokenizer,model,nlp,batch_mode=True, output_file=ir_output_generated)
elif(result == "2"):
if os.path.exists(ir_input_generated):
information_retrieval.batch_search(input_file=ir_input_generated, output_file=ir_output_generated, indexer=pubmed_article_ix, parser=qp)
else:
print("\033[91mMake sure you run the QU module before running the IR module.\033[0m")
elif(result == "3"):
if os.path.exists(ir_output_generated):
question_answering.run_batch_mode(input_file=ir_output_generated,output_dir=qa_output_generated_dir)
else:
print("\033[91mMake sure you run both the QU module and the IR module before running the QA module.\033[0m")
elif(result == "4"):
test_dataframe = pd.read_csv(qu_input,sep=',',header=0)
question_understanding.ask_and_receive(test_dataframe,device,tokenizer,model,nlp,batch_mode=True, output_file=ir_output_generated)
information_retrieval.batch_search(input_file=ir_input_generated, output_file=ir_output_generated, indexer=pubmed_article_ix, parser=qp)
elif(result == "5"):
if os.path.exists(ir_input_generated):
information_retrieval.batch_search(input_file=ir_input_generated, output_file=ir_output_generated, indexer=pubmed_article_ix, parser=qp)
question_answering.run_batch_mode(input_file=ir_output_generated,output_dir=qa_output_generated_dir)
else:
print("\033[91mMake sure you run the QU module before running the IR module.\033[0m")
else:
print("\033[95mShutting down...\033[0m")
quit()
# If the user responds with anything not affirmative, send them to the live question answering
else:
n = 0
while(True):
user_question = input("\033[95m:: Please enter your question for the BioASQ QA system or \'quit\' ::\n\033[0m")
# handle end loop
if user_question == 'quit':
quit()
df = | pd.DataFrame({'ID':[n],'Question':user_question}) | pandas.DataFrame |
from datetime import datetime, timedelta
import pandas as pd
from pandas_datareader.exceptions import UnstableAPIWarning
from pandas_datareader.iex import IEX
# Data provided for free by IEX
# Data is furnished in compliance with the guidelines promulgated in the IEX
# API terms of service and manual
# See https://iextrading.com/api-exhibit-a/ for additional information
# and conditions of use
class DailySummaryReader(IEX):
"""
Daily statistics from IEX for a day or month
"""
def __init__(self, symbols=None, start=None, end=None, retry_count=3,
pause=0.1, session=None):
import warnings
warnings.warn('Daily statistics is not working due to issues with the '
'IEX API', UnstableAPIWarning)
self.curr_date = start
super(DailySummaryReader, self).__init__(symbols=symbols,
start=start, end=end,
retry_count=retry_count,
pause=pause, session=session)
@property
def service(self):
"""Service endpoint"""
return "stats/historical/daily"
def _get_params(self, symbols):
p = {}
if self.curr_date is not None:
p['date'] = self.curr_date.strftime('%Y%m%d')
return p
def read(self):
"""Unfortunately, IEX's API can only retrieve data one day or one month
at a time. Rather than specifying a date range, we will have to run
the read function for each date provided.
:return: DataFrame
"""
tlen = self.end - self.start
dfs = []
for date in (self.start + timedelta(n) for n in range(tlen.days)):
self.curr_date = date
tdf = super(DailySummaryReader, self).read()
dfs.append(tdf)
return pd.concat(dfs)
class MonthlySummaryReader(IEX):
"""Monthly statistics from IEX"""
def __init__(self, symbols=None, start=None, end=None, retry_count=3,
pause=0.1, session=None):
self.curr_date = start
self.date_format = '%Y%m'
super(MonthlySummaryReader, self).__init__(symbols=symbols,
start=start, end=end,
retry_count=retry_count,
pause=pause,
session=session)
@property
def service(self):
"""Service endpoint"""
return "stats/historical"
def _get_params(self, symbols):
p = {}
if self.curr_date is not None:
p['date'] = self.curr_date.strftime(self.date_format)
return p
def read(self):
"""Unfortunately, IEX's API can only retrieve data one day or one month
at a time. Rather than specifying a date range, we will have to run
the read function for each date provided.
:return: DataFrame
"""
tlen = self.end - self.start
dfs = []
# Build list of all dates within the given range
lrange = [x for x in (self.start + timedelta(n)
for n in range(tlen.days))]
mrange = []
for dt in lrange:
if datetime(dt.year, dt.month, 1) not in mrange:
mrange.append(datetime(dt.year, dt.month, 1))
lrange = mrange
for date in lrange:
self.curr_date = date
tdf = super(MonthlySummaryReader, self).read()
# We may not return data if this was a weekend/holiday:
if not tdf.empty:
tdf['date'] = date.strftime(self.date_format)
dfs.append(tdf)
# We may not return any data if we failed to specify useful parameters:
return | pd.concat(dfs) | pandas.concat |
import json
import io
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import dash
from dash import html
from dash import dcc
import dash_bootstrap_components as dbc
import pandas as pd
import numpy as np
import plotly.express as px
from dash.dependencies import Output, Input, State
from datetime import datetime, timedelta
from server import app
import plotly.graph_objects as go
import plotly.express as px
from sqlalchemy import create_engine
from flask import send_file
import os
from joblib import Parallel, delayed
from dash.exceptions import PreventUpdate
# ----------------------------------------------------------------------------------------------------- 一级图一 ----------------------------------------------------------------------------------------------------------------------
# 获取概览一级第一张图数据
def get_first_lev_first_fig_date(engine):
res_数据时间缺失及汇总 = pd.DataFrame(columns=['业务类型', '问题数', '总数', '问题数量占比'])
# 问题类别、问题数据量统计、全数据统计
bus_dic = {
# '患者基本信息': ['select count(distinct caseid) as num from overall where in_time is null or out_time is null','select count(distinct caseid) as num from overall'],
'入院时间': ['select count(distinct caseid) as num from overall where in_time is null ',
'select count(distinct caseid) as num from overall'],
'出院时间': ['select count(distinct caseid) as num from overall where out_time is null',
'select count(distinct caseid) as num from overall'],
'手术': ['select count(1) as num from oper2 where BEGINTIME is null or ENDTIME is null ','select count(1) as num from oper2 '],
'给药': ['select count(1) as num from ANTIBIOTICS where BEGINTIME is null or ENDTIME is null ','select count(1) as num from ANTIBIOTICS '],
'入出转': ['select count(1) as num from DEPARTMENT where BEGINTIME is null or ENDTIME is null ','select count(1) as num from DEPARTMENT '],
'菌检出': ['select count(1) as num from BACTERIA where REQUESTTIME is null ','select count(1) as num from BACTERIA '],
'体温': ['select count(1) as num from TEMPERATURE where RECORDDATE is null ','select count(1) as num from TEMPERATURE '],
'药敏': ['select count(1) as num from DRUGSUSCEPTIBILITY where REQUESTTIME is null or REPORTTIME is null ','select count(1) as num from DRUGSUSCEPTIBILITY '],
'检查': ['select count(1) as num from EXAM where EXAM_DATE is null ','select count(1) as num from EXAM '],
'生化': ['select count(1) as num from ROUTINE2 where REQUESTTIME is null or REPORTTIME is null ','select count(1) as num from ROUTINE2 '],
'三管': ['select count(1) as num from TREATMENT1 where BEGINTIME is null or ENDTIME is null ','select count(1) as num from TREATMENT1 '],
}
for bus in bus_dic:
try:
count_时间为空 = pd.read_sql(bus_dic[bus][0],con=engine)['num'][0]
count_总 = pd.read_sql(bus_dic[bus][1],con=engine)['num'][0]
res_数据时间缺失及汇总.loc[res_数据时间缺失及汇总.shape[0]] = [bus,count_时间为空,count_总,round(count_时间为空 / count_总, 4) * 100]
except:
res_数据时间缺失及汇总.loc[res_数据时间缺失及汇总.shape[0]] = [bus,-1,-1,-1]
print('一级图一',bus)
return res_数据时间缺失及汇总
# 更新一级图一
@app.callback(
Output('first_level_first_fig','figure'),
Output('general_situation_first_level_first_fig_data','data'),
Input('general_situation_first_level_first_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_first_level_first_fig(general_situation_first_level_first_fig_data,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
if general_situation_first_level_first_fig_data is None:
general_situation_first_level_first_fig_data = {}
first_level_first_fig_data = get_first_lev_first_fig_date(engine)
general_situation_first_level_first_fig_data['first_level_first_fig_data'] = first_level_first_fig_data.to_json(orient='split', date_format='iso')
general_situation_first_level_first_fig_data['hosname'] = db_con_url['hosname']
general_situation_first_level_first_fig_data = json.dumps(general_situation_first_level_first_fig_data)
else:
general_situation_first_level_first_fig_data = json.loads(general_situation_first_level_first_fig_data)
if db_con_url['hosname'] != general_situation_first_level_first_fig_data['hosname']:
first_level_first_fig_data = get_first_lev_first_fig_date(engine)
general_situation_first_level_first_fig_data['first_level_first_fig_data'] = first_level_first_fig_data.to_json(orient='split',date_format='iso')
general_situation_first_level_first_fig_data['hosname'] = db_con_url['hosname']
general_situation_first_level_first_fig_data = json.dumps(general_situation_first_level_first_fig_data)
else:
first_level_first_fig_data = pd.read_json(general_situation_first_level_first_fig_data['first_level_first_fig_data'], orient='split')
general_situation_first_level_first_fig_data = dash.no_update
#
fig_概览一级_时间缺失 = make_subplots(specs=[[{"secondary_y": True}]])
res_数据时间缺失及汇总 = first_level_first_fig_data.sort_values(['问题数'], ascending=False)
# 各业务缺失数量--柱形图
fig_概览一级_时间缺失.add_trace(
go.Bar(x=res_数据时间缺失及汇总['业务类型'], y=res_数据时间缺失及汇总['问题数'], name="问题数量",
marker_color=px.colors.qualitative.Dark24, ),
secondary_y=False,
)
# 各业务缺失数量占比--折线图
fig_概览一级_时间缺失.add_trace(
go.Scatter(x=res_数据时间缺失及汇总['业务类型'], y=res_数据时间缺失及汇总['问题数量占比'], name="问题数量占比", ),
secondary_y=True,
)
# 设置X轴title
fig_概览一级_时间缺失.update_xaxes(tickangle=45,title_text="业务指标")
# 设置Y轴title
fig_概览一级_时间缺失.update_yaxes(title_text="缺失数量", secondary_y=False)
fig_概览一级_时间缺失.update_yaxes(title_text="缺失占比(%)", secondary_y=True)
# 设置水平图例及位置
fig_概览一级_时间缺失.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
# 设置图片边距
fig_概览一级_时间缺失.update_layout(margin=dict(l=20, r=20, t=20, b=20), )
return fig_概览一级_时间缺失,general_situation_first_level_first_fig_data
# 下载一级图一明细
@app.callback(
Output('first_level_first_fig_data_detail', 'data'),
Input('first_level_first_fig_data_detail_down','n_clicks'),
Input("db_con_url", "data"),
prevent_initial_call=True,
)
def download_first_level_first_fig_data_detail(n_clicks,db_con_url):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
engine = create_engine(db_con_url['db'])
bus_dic = {
'入院时间': 'select * from overall where in_time is null ',
'出院时间': 'select * from overall where out_time is null',
'手术': 'select * from oper2 where BEGINTIME is null or ENDTIME is null ',
'给药': 'select * from ANTIBIOTICS where BEGINTIME is null or ENDTIME is null ',
'入出转': 'select * from DEPARTMENT where BEGINTIME is null or ENDTIME is null ',
'菌检出': 'select * from BACTERIA where REQUESTTIME is null ',
'药敏': 'select * from DRUGSUSCEPTIBILITY where REQUESTTIME is null or REPORTTIME is null ',
'检查': 'select * from EXAM where EXAM_DATE is null',
'生化': 'select * from ROUTINE2 where REQUESTTIME is null or REPORTTIME is null ',
'三管': 'select * from TREATMENT1 where BEGINTIME is null or ENDTIME is null ',
}
output = io.BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
for key in bus_dic.keys():
try:
temp = pd.read_sql(bus_dic[key],con=engine)
if temp.shape[0]>0:
temp.to_excel(writer, sheet_name=key)
except:
error_df = | pd.DataFrame(['明细数据获取出错'],columns=[key]) | pandas.DataFrame |
# coding: utf-8
import numpy as np
import pandas as pd
import argparse, os.path, glob
def get_loss_diff(df_full, df_local):
df_full = df_full.sort_values('seq_and_time_ixs')
for context_length, sub_df_local in df_local.groupby('context_length'):
sub_df_local = sub_df_local.sort_values('seq_and_time_ixs')
df_local.loc[sub_df_local.index, 'loss_diff'] = sub_df_local.loss - df_full.loss.values
df_local.loc[:,'context_length'] = df_local.context_length.astype(int)
return df_local
def get_statistically_effective_context_length(
df,
group_name,
threshold=0.01,
seed=111,
num_bs_samples=10000
):
update_message = """
UPDATE on Jun 25 2021:
TO make it parallel to the Markovian order, SECL is now defined as
"the MINIMUM length of truncated context where its difference from the full context is BELOW the threshold."
Previously, the definition was:
"the MAXIMUM length of truncated context where its difference from the full context is ABOVE the threshold."
In short, this revision makes the new SECL = old SECL + 1.
"""
print(update_message)
ecls = {}
for gp, sub_df in df.groupby(group_name):
print('{}:{}'.format(group_name, gp))
# UPDATE on Jun 25 2021:
# TO make it parallel to the Markovian order, SECL is now defined as "the minimum "
# previous_context_length = 0
# previous_perplex_gain = None
ecl_detected = False
for context_length, subsub_df in sub_df.groupby('context_length'):
random_state = np.random.RandomState(seed)
samples = [subsub_df.sample(frac=1.0, replace=True, random_state=random_state).loss_diff.mean()
for iter_ix in range(num_bs_samples)
]
perplex_gain = np.exp(np.percentile(samples,5.0))
if perplex_gain < 1.0+threshold:
print('N={}'.format(subsub_df.shape[0]))
print('Statistically Effective Context Length (SECL) is {}'.format(context_length))
print('Perplexity improvement at SECL: {}'.format(perplex_gain))
# print('Perplexity improvement at {}: {}'.format(context_length, perplex_gain))
ecls[gp] = context_length
ecl_detected = True
break
# else:
# previous_context_length = context_length
# previous_perplex_gain = perplex_gain
if not ecl_detected:
print('N={}'.format(subsub_df.shape[0]))
print('Statistically Effective Context Length (SECL) is >{}'.format(context_length))
print('Perplexity improvement at SECL: {}'.format(perplex_gain))
print('Achieved the maximum tested.')
ecls[gp] = context_length
ecls = | pd.Series(ecls, name='SECL') | pandas.Series |
import os
from datapackage import Package
import pandas as pd
import json
import matplotlib.pyplot as plt
from oemof.tools.economics import annuity
path = os.path.join(os.getcwd(), "results")
scenario_year = 2050
wacc = 0.05
technologies = pd.DataFrame(
Package(
"https://raw.githubusercontent.com/ZNES-datapackages/"
"angus-input-data/master/technology/datapackage.json"
)
.get_resource("technology")
.read(keyed=True)
).set_index(["year", "parameter", "carrier", "tech"])
heat = pd.DataFrame(
Package(
"https://raw.githubusercontent.com/ZNES-datapackages/"
"angus-input-data/master/technology/datapackage.json"
)
.get_resource("heat")
.read(keyed=True)
).set_index(["year", "parameter", "carrier", "tech"])
technologies = | pd.concat([technologies, heat]) | pandas.concat |
from __future__ import division #brings in Python 3.0 mixed type calculations
import numpy as np
import os
import pandas as pd
import sys
#find parent directory and import model
parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
sys.path.append(parentddir)
from base.uber_model import UberModel, ModelSharedInputs
class BeerexInputs(ModelSharedInputs):
"""
Input class for Beerex
"""
def __init__(self):
"""Class representing the inputs for Beerex"""
super(BeerexInputs, self).__init__()
#self.incorporation_depth = pd.Series([], dtype="float")
self.application_rate = pd.Series([], dtype="float")
self.application_method = pd.Series([], dtype="object")
self.crop_type = pd.Series([], dtype="object")
# self.application_units = pd.Series([], dtype="object")
self.empirical_residue = pd.Series([], dtype="object")
self.empirical_pollen = pd.Series([], dtype="float")
self.empirical_nectar = pd.Series([], dtype="float")
self.empirical_jelly = pd.Series([], dtype="float")
self.adult_contact_ld50 = pd.Series([], dtype="float")
self.adult_oral_ld50 = pd.Series([], dtype="float")
self.adult_oral_noael = pd.Series([], dtype="float")
self.larval_ld50 = pd.Series([], dtype="float")
self.larval_noael = pd.Series([], dtype="float")
self.log_kow = pd.Series([], dtype="float")
self.koc = pd.Series([], dtype="float")
self.mass_tree_vegetation = pd.Series([], dtype="float")
self.lw1_jelly = pd.Series([], dtype="float")
self.lw2_jelly = pd.Series([], dtype="float")
self.lw3_jelly = pd.Series([], dtype="float")
self.lw4_nectar = pd.Series([], dtype="float")
self.lw4_pollen = pd.Series([], dtype="float")
self.lw5_nectar = pd.Series([], dtype="float")
self.lw5_pollen = pd.Series([], dtype="float")
self.ld6_nectar = pd.Series([], dtype="float")
self.ld6_pollen = pd.Series([], dtype="float")
self.lq1_jelly = pd.Series([], dtype="float")
self.lq2_jelly = pd.Series([], dtype="float")
self.lq3_jelly = pd.Series([], dtype="float")
self.lq4_jelly = pd.Series([], dtype="float")
self.aw_cell_nectar = pd.Series([], dtype="float")
self.aw_cell_pollen = pd.Series([], dtype="float")
self.aw_brood_nectar = pd.Series([], dtype="float")
self.aw_brood_pollen = pd.Series([], dtype="float")
self.aw_comb_nectar = pd.Series([], dtype="float")
self.aw_comb_pollen = pd.Series([], dtype="float")
self.aw_fpollen_nectar = pd.Series([], dtype="float")
self.aw_fpollen_pollen = pd.Series([], dtype="float")
self.aw_fnectar_nectar = pd.Series([], dtype="float")
self.aw_fnectar_pollen = pd.Series([], dtype="float")
self.aw_winter_nectar = pd.Series([], dtype="float")
self.aw_winter_pollen = pd.Series([], dtype="float")
self.ad_nectar = pd.Series([], dtype="float")
self.ad_pollen = pd.Series([], dtype="float")
self.aq_jelly = pd.Series([], dtype="float")
class BeerexOutputs(object):
"""
Output class for Beerex
"""
def __init__(self):
"""Class representing the outputs for Beerex"""
super(BeerexOutputs, self).__init__()
self.out_eec_spray = pd.Series(name="out_eec_spray", dtype="float")
self.out_eec_soil = pd.Series(name="out_eec_soil", dtype="float")
self.out_eec_seed = pd.Series(name="out_eec_seed", dtype="float")
self.out_eec_tree = pd.Series(name="out_eec_tree", dtype="float")
self.out_eec = pd.Series(name="out_eec", dtype="float")
self.out_lw1_total_dose = pd.Series(name="out_lw1_total_dose", dtype="float")
self.out_lw2_total_dose = pd.Series(name="out_lw2_total_dose", dtype="float")
self.out_lw3_total_dose = pd.Series(name="out_lw3_total_dose", dtype="float")
self.out_lw4_total_dose = pd.Series(name="out_lw4_total_dose", dtype="float")
self.out_lw5_total_dose = pd.Series(name="out_lw5_total_dose", dtype="float")
self.out_ld6_total_dose = pd.Series(name="out_ld6_total_dose", dtype="float")
self.out_lq1_total_dose = pd.Series(name="out_lq1_total_dose", dtype="float")
self.out_lq2_total_dose = pd.Series(name="out_lq2_total_dose", dtype="float")
self.out_lq3_total_dose = pd.Series(name="out_lq3_total_dose", dtype="float")
self.out_lq4_total_dose = pd.Series(name="out_lq4_total_dose", dtype="float")
self.out_aw_cell_total_dose = pd.Series(name="out_aw_cell_total_dose", dtype="float")
self.out_aw_brood_total_dose = pd.Series(name="out_aw_brood_total_dose", dtype="float")
self.out_aw_comb_total_dose = pd.Series(name="out_aw_comb_total_dose", dtype="float")
self.out_aw_pollen_total_dose = pd.Series(name="out_aw_pollen_total_dose", dtype="float")
self.out_aw_nectar_total_dose = pd.Series(name="out_aw_nectar_total_dose", dtype="float")
self.out_aw_winter_total_dose = pd.Series(name="out_aw_winter_total_dose", dtype="float")
self.out_ad_total_dose = pd.Series(name="out_ad_total_dose", dtype="float")
self.out_aq_total_dose = pd.Series(name="out_aq_total_dose", dtype="float")
self.out_lw1_acute_rq = | pd.Series(name="out_lw1_acute_rq", dtype="float") | pandas.Series |
# -*- coding: utf-8 -*-
"""
Script to compare output of the mass_pattern_finder with a reference file
(C) <NAME>, ETH Zurich
February 2022
"""
from collections import defaultdict
import sys
import os
import argparse
import pandas as pd
import matplotlib.pyplot as plt
import cmocean
from numpy import log10
from math import ceil, floor
import numpy as np
from copy import deepcopy
def sum_peaks(peaks):
print(sum([len(peaks[rt]) for rt in peaks]))
def obtain_peaks_from_file(file):
#Extract the peaks from the file
txt = open(file, 'r').read()
timepoints = txt.split('Found matching pattern at ')[1:]
peaks_from_file = dict()
for timepoint in timepoints:
masses = timepoint.split('\n\tMass: ')
time = float(masses.pop(0).replace(' min:',''))
peaks_from_file[time] = list()
for mass_dat in masses:
mass = float(mass_dat.split('\t')[0])
intensity = float(mass_dat.split('\t')[1].split(': ')[1])
formulas = [formula.replace('\n', '') for formula in mass_dat.split('\t\t')[1:]]
peaks_from_file[time].append([mass, intensity, formulas])
return peaks_from_file
def parse_files(files, args, sample):
# Pare the input files and obtain the peaks. Cluster them if necessary
peaks = defaultdict(list)
for file in files:
peaks[file] = obtain_peaks_from_file(file)
if len(peaks[file]) == 0:
sys.stdout.write(f"Did not find any peaks in {file}.\n")
files = [file for file in files if len(peaks[file]) > 0 ]
peaks_in_all_files = peaks[files[0]]
# If there are more files, make sure the peaks are in all the sample files
for file_iterator, file in enumerate(files):
if file_iterator == 0:
sys.stdout.write(f"Found {len(files)} {''.join(['input' if sample else 'reference'])} files:\n\t" + '\n\t'.join([file for file in files]) + '\n')
for reference_file in files[file_iterator+1:]:
sys.stdout.write(f"Finding {''.join(['unique' if sample else 'shared'])} peaks between {os.path.basename(file)} and {os.path.basename(reference_file)}.\n")
if file_iterator == 0:
unique_peaks, peaks_in_all_files = compare_peaks(peaks[file], peaks[reference_file], args, sample)
else:
unique_peaks, peaks_in_all_files = compare_peaks(peaks_in_all_files, peaks[reference_file], args, sample)
if not sample:
for rt in unique_peaks:
peaks_in_all_files[rt].extend(unique_peaks[rt])
return peaks_in_all_files
def compare_input_mass_to_reference_masses(input_mass, reference_masses, args, mode):
# Small function to see if an input mass is present in a list of reference masses
result = [None, None]
for reference_mass in reference_masses:
if abs(input_mass[0]-reference_mass[0]) < args.mass_tolerance*input_mass[0]:
if mode == 'compare':
result[0] = input_mass
else:
result[0] = [round((input_mass[0] + reference_mass[0])/2, 4), round((input_mass[1] + reference_mass[1])/2, 1), input_mass[2]]
break
return result
# If no matches are found, the peak is unique
if input_mass[1] > args.min_intensity:
result[1] = input_mass
return result
def compare_peaks(input_peaks, reference_peaks, args, mode):
# Compare the peaks and return a dictionary of unique and mutual peaks
# For the mutual peaks, the average of the masses, retention times and intensities is reported
unique_peaks = defaultdict(list)
mutual_peaks = defaultdict(list)
num_reference_peaks = len(reference_peaks)
rt_ref_peaks = [rt for rt in reference_peaks]
ref_iterator = 0
total_input_peaks = len(input_peaks)
for rt_index, rt in enumerate(input_peaks):
#if rt > 6.46:
# break
# Check for every elution time...
if mode == 'compare':
sys.stdout.write(f"\rProgress: {round(rt_index/total_input_peaks*100, 2)}%")
# Check if the peaks at all elution times in the sample match to a reference
if ref_iterator < len(reference_peaks)-1:
# If the reference iterator for the reference is still much earlier, first catch up
while rt_ref_peaks[ref_iterator]-rt < -1*args.time_tolerance and ref_iterator < len(rt_ref_peaks)-1:
ref_iterator += 1
if rt_ref_peaks[ref_iterator]-rt > args.time_tolerance:
unique_peaks[rt] = input_peaks[rt]
continue
if rt_ref_peaks[-1] < rt:
unique_peaks[rt] = input_peaks[rt]
continue
elif abs(rt_ref_peaks[ref_iterator]-rt) <= args.time_tolerance and ref_iterator < len(rt_ref_peaks):
input_masses = input_peaks[rt]
# If the rt range of interest is in the reference, look in the neighboring rts for all masses
reference_masses = reference_peaks[rt_ref_peaks[ref_iterator]]
ref_mass_iterator = ref_iterator
while rt_ref_peaks[ref_mass_iterator]-rt < 1*args.time_tolerance and ref_mass_iterator < len(rt_ref_peaks)-1:
reference_masses.extend(reference_peaks[rt_ref_peaks[ref_mass_iterator]])
ref_mass_iterator += 1
# And then compare each input_mass to the masses in the surrounding reference spectra
for input_mass in input_masses:
mutual_peak, unique_peak = compare_input_mass_to_reference_masses(input_mass, reference_masses, args, mode)
if mutual_peak:
mutual_peaks[round(rt, 2)].append(mutual_peak)
elif unique_peak:
unique_peaks[round(rt, 2)].append(unique_peak)
sys.stdout.write('\n')
return unique_peaks, mutual_peaks
def filter_peaks_for_identical_masses(input_peaks, args):
# Combine hits that have masses within the mass tolerance
# First filter each spectrum for identical peaks
filtered_peaks = defaultdict(list)
for rt in input_peaks:
peaks = sorted(input_peaks[rt], key=lambda peak: peak[0])
peak_mass, peak_intensity, peak_formula = [0, 0, '']
start_mass = 0
peak_iterator = 0
while peak_iterator < len(peaks)-1:
mass, intensity, formula = peaks[peak_iterator]
if mass-start_mass <= 2*args.mass_tolerance*start_mass and intensity > peak_intensity:
peak_mass = mass
peak_intensity = intensity
peak_formula = formula
if mass-start_mass > 2*args.mass_tolerance*start_mass:
if peak_iterator > 0:
filtered_peaks[rt].append([peak_mass, peak_intensity, peak_formula])
start_mass = mass
peak_mass = mass
peak_intensity = intensity
peak_formula = formula
peak_iterator += 1
rts = [rt for rt in filtered_peaks]
# Make a copy
centered_peaks = deepcopy(filtered_peaks)
for rt_index, rt in enumerate(filtered_peaks):
peaks = filtered_peaks[rt]
for mass, intensity, formula in peaks:
# Check for each peak in the neighborhing retention times
rt_iterator = 1
while rt_index+rt_iterator < len(rts):
next_rt = rts[rt_index+rt_iterator]
if next_rt-rt > args.time_tolerance:
break
next_peaks = filtered_peaks[next_rt]
for next_mass, next_intensity, next_formula in next_peaks:
if next_mass - mass < -1*args.mass_tolerance*mass:
continue
elif next_mass - mass > args.mass_tolerance*mass:
break
elif intensity >= next_intensity:
try:
centered_peaks[next_rt].remove([next_mass, next_intensity, next_formula])
except ValueError:
pass
elif next_intensity > intensity:
try:
centered_peaks[rt].remove([mass, intensity, formula])
except ValueError:
pass
rt_iterator += 1
return centered_peaks
def plot_results_in_2D(unique_peaks, plot_filename, time_range, mass_range, args):
# Plot the analyzed spectra in a single graph
plot_list = list()
for rt in unique_peaks:
spectrum = unique_peaks[rt]
for peak in spectrum:
plot_list.append([peak[0], rt, log10(peak[1])])
plot_list = | pd.DataFrame(plot_list, columns=['mass', 'time', 'intensity']) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 9 21:23:40 2019
@authors: <EMAIL>
Last modified: 2019-11-24
------------------------------------------------------
** Semantic Search Analysis: Maintain Match Files **
------------------------------------------------------
Update things like removing dupes that sneak in over time, punctuation, resorting...
"""
#%%
# ============================================
# 1. Start-up / What to put into place, where
# ============================================
'''
File locations, etc.
'''
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.pyplot import pie, axis, show
import matplotlib.ticker as mtick # used for example in 100-percent bars chart
import numpy as np
import os
import re
import string
import requests
import json
import lxml.html as lh
from lxml.html import fromstring
# Set working directory and directories for read/write
home_folder = os.path.expanduser('~')
os.chdir(home_folder + '/Projects/classifysearches')
dataMatchFiles = 'data/matchFiles/' # Permanent helper files; both reading and writing required
#%%
# ========================================================================
# To update SiteSpecificMatches.xlsx, such as punctuation, removing dupes
# ========================================================================
SiteSpecificMatches = pd.read_excel('data/matchFiles/SiteSpecificMatches.xlsx')
# Replace hyphen with space because the below would replace with nothing
SiteSpecificMatches['AdjustedQueryTerm'] = SiteSpecificMatches['AdjustedQueryTerm'].str.replace('-', ' ')
# Remove https:// if used
SiteSpecificMatches['AdjustedQueryTerm'] = SiteSpecificMatches['AdjustedQueryTerm'].str.replace('http://', '')
SiteSpecificMatches['AdjustedQueryTerm'] = SiteSpecificMatches['AdjustedQueryTerm'].str.replace('https://', '')
# Drop nulls
SiteSpecificMatches = SiteSpecificMatches.dropna(subset=['AdjustedQueryTerm'])
# Remove all chars except a-zA-Z0-9 and leave foreign chars alone
SiteSpecificMatches['AdjustedQueryTerm'] = SiteSpecificMatches['AdjustedQueryTerm'].str.replace(r'[^\w\s]+', '')
# Removing punct may mean that some entries will be duplicates
SiteSpecificMatches = SiteSpecificMatches.drop_duplicates(subset=['AdjustedQueryTerm'])
# Sort for easier editing
SiteSpecificMatches = SiteSpecificMatches.sort_values(by=['PreferredTerm', 'AdjustedQueryTerm'], ascending=[True, True])
# Write out
writer = pd.ExcelWriter('data/matchFiles/SiteSpecificMatches.xlsx')
SiteSpecificMatches.to_excel(writer,'SiteSpecificMatches', index=False)
# df2.to_excel(writer,'Sheet2')
writer.save()
#%%
# ================================================================
# To update PastMatches.xlsx, such as punctuation, removing dupes
# ================================================================
PastMatches = pd.read_excel('data/matchFiles/PastMatches.xlsx')
# Replace hyphen with space because the below would replace with nothing
PastMatches['AdjustedQueryTerm'] = PastMatches['AdjustedQueryTerm'].str.replace('-', ' ')
# Remove https:// if used
PastMatches['AdjustedQueryTerm'] = PastMatches['AdjustedQueryTerm'].str.replace('http://', '')
PastMatches['AdjustedQueryTerm'] = PastMatches['AdjustedQueryTerm'].str.replace('https://', '')
# Drop nulls
PastMatches = PastMatches.dropna(subset=['AdjustedQueryTerm'])
# Remove all chars except a-zA-Z0-9 and leave foreign chars alone
PastMatches['AdjustedQueryTerm'] = PastMatches['AdjustedQueryTerm'].str.replace(r'[^\w\s]+', '')
# Removing punct may mean that some entries will be duplicates
PastMatches = PastMatches.drop_duplicates(subset=['AdjustedQueryTerm'])
# Merge PastMatches with SiteSpecificMatches
PastMatches = pd.merge(PastMatches, SiteSpecificMatches, indicator=True, how='outer')
# Drop rows of all dupes in AdjustedQuery col
PastMatches = PastMatches.drop_duplicates(subset=['AdjustedQueryTerm'], keep=False)
# Reduce to only the rows that came from PastMatches
PastMatches = PastMatches[PastMatches._merge.str.contains("left_only") == True]
# Remove unneeded col
PastMatches.drop('_merge', axis=1, inplace=True)
# Sort for easier editing
PastMatches = PastMatches.sort_values(by=['PreferredTerm', 'AdjustedQueryTerm'], ascending=[True, True])
# Write out
writer = pd.ExcelWriter('data/matchFiles/PastMatches.xlsx')
PastMatches.to_excel(writer,'PastMatches', index=False)
# df2.to_excel(writer,'Sheet2')
writer.save()
#%%
# ============================================================
# To update UmlsMesh.csv, such as punctuation, removing dupes
# ============================================================
UmlsMesh = | pd.read_csv('data/matchFiles/UmlsMesh.csv', sep='|') | pandas.read_csv |
# region Includes
import pandas as pd
from utils import *
from models.index import model
from preprocess.index import pre_process
import joblib
# endregion
p("READING THE DATA... ")
# region Reading Data
train = pd.read_csv('heart_train.csv')
test = pd.read_csv('heart_test.csv')
y_test = pd.read_csv('sample.csv')
del train["index"]
del test["Index"]
del y_test["Index"]
train.dropna(inplace=True)
desc = train.describe()
p(desc)
# endregion
delim()
p("COLUMN SELECTION...")
# region column selection
x_train = train.loc[:, train.columns != "target"]
y_train = train.loc[:, train.columns == "target"]
sample_size = 2
print(f"""
Final Training data:
Shape:
x_train: {x_train.shape}
y_train: {y_train.shape}
Sample:
x_train: \n{x_train[:sample_size]}
y_train: \n{y_train[:sample_size]}
""")
# endregion
delim()
p("PRE-PROCESSING...")
# region pre-processing
x_train, y_test, y_train = pre_process(x_train, test, y_train)
# endregion
delim()
p("MODEL TRAINING...")
# region models
mod = 'svm'
clf = model(x_train, y_train, mod)
# endregion
delim()
p("Saving model...")
# region export
filename = f'{mod}_model.pkl'
joblib.dump(clf, './'+filename)
# endregion
delim()
p('Loading model')
# region loading model
loaded_model = joblib.load('./'+filename)
# endregion
delim()
p('Predicting and saving results to CSV...')
# region prediction
pr = loaded_model.predict(test)
f = {'Index': [i for i in range(243, 304)], "target": pr}
df = | pd.DataFrame(f) | pandas.DataFrame |
#!/usr/bin/env python
from __future__ import print_function
import warnings
import pandas as pd
from tabulate import tabulate
from matplotlib import pyplot as plt
import matplotlib
import numpy as np
import cPickle
######################################
warnings.filterwarnings('ignore')
pd.options.display.max_columns = 100
matplotlib.style.use('ggplot')
pd.options.display.max_rows = 100
######################################
train = pd.read_csv('../misc/data/train.csv')
test = pd.read_csv('../misc/data/test.csv')
# Prints the head of data prettily :)
# print(tabulate(train.head(), headers='keys', tablefmt='psql'))
# Describes the data stats
# print(tabulate(train.describe(), headers='keys', tablefmt='psql'))
# Imputing 'Age' column with median values
train['Age'].fillna(train['Age'].median(), inplace=True)
surv_sex = train[train['Survived'] == 1]['Sex'].value_counts()
dead_sex = train[train['Survived'] == 0]['Sex'].value_counts()
# Create graph for SurvivalRate w.r.t Gender
# df = pd.DataFrame([surv_sex, dead_sex])
# df.index = ['Survived', 'Dead']
# df.plot(kind='bar', stacked=True, figsize=(15, 8))
# plt.show()
surv_age = train[train['Survived'] == 1]['Age']
dead_age = train[train['Survived'] == 0]['Age']
# In order to tabulate a 1D array,
# reshape the array into 2D array as
# tabulate only allows 2D arrays as input
# surv_age = np.reshape(surv_age, (-1, 1))
# print(tabulate(surv_age[:20, :], headers='keys', tablefmt='psql'))
# Create a graph for SurvivalRate w.r.t Age
# plt.hist([surv_age, dead_age], stacked=True, color=['g', 'r'], bins=30, label=['Survived', 'Dead'])
# plt.xlabel('Age')
# plt.ylabel('Number of Passengers')
# plt.legend()
# plt.show()
surv_fare = train[train['Survived'] == 1]['Fare']
dead_fare = train[train['Survived'] == 0]['Fare']
# Create a graph for SurvivalRate w.r.t Fare
# plt.hist([surv_fare, dead_fare], stacked=True, color=['g', 'r'], bins=30, label=['Survived', 'Dead'])
# plt.xlabel('Fare')
# plt.ylabel('Number of Passengers')
# plt.legend()
# plt.show()
# Graph
# plt.figure(figsize=(15, 8))
# ax = plt.subplot()
# ax.scatter(surv_age, surv_fare, c='green', s=40)
# ax.scatter(dead_age, dead_fare, c='red', s=40)
# Graph
# ax.set_xlabel('Age')
# ax.set_ylabel('Fare')
# ax.legend(('survived', 'dead'), scatterpoints=1, loc='upper right', fontsize=15)
# plt.show()
# Graph
# ax = plt.subplot()
# ax.set_ylabel('Average Fare')
# train.groupby('Pclass').mean()['Fare'].plot(kind='bar', figsize=(15, 8), ax=ax)
# plt.show()
surv_embark = train[train['Survived'] == 1]['Embarked'].value_counts()
dead_embark = train[train['Survived'] == 0]['Embarked'].value_counts()
# Create a graph for SurvivalRate w.r.t EmbarkedPosition
# df = pd.DataFrame([surv_embark, dead_embark])
# df.index = ['Survived', 'Dead']
# df.plot(kind='bar', stacked=True, figsize=(15, 8))
# plt.show()
def status(feature):
print('processing', feature, ': OK')
# Feature Engineering
def getCombinedData():
test = pd.read_csv('../misc/data/test.csv')
train = pd.read_csv('../misc/data/train.csv')
# Extracting, then removing targets from training data
targets = train.Survived
train.drop('Survived', 1, inplace=True)
# merging train and test data for feature engineering
combined = train.append(test)
combined.reset_index(inplace=True)
combined.drop('index', inplace=True, axis=1)
return combined
combined = getCombinedData()
# pretty-print combined data
# print(combined.shape)
# print(tabulate(combined.describe(), headers='keys', tablefmt='psql'))
# print(tabulate(combined[:100][:], headers='keys', tablefmt='psql'))
def getTitles():
global combined
# extract title from each name
combined['Title'] = combined['Name'].map(lambda name:name.split(',')[1].split('.')[0].strip())
# mapping titles
Title_Dictionary = {
"Capt": "Officer",
"Col": "Officer",
"Major": "Officer",
"Jonkheer": "Royalty",
"Don": "Royalty",
"Sir" : "Royalty",
"Dr": "Officer",
"Rev": "Officer",
"the Countess": "Royalty",
"Dona": "Royalty",
"Mme": "Mrs",
"Mlle": "Miss",
"Ms": "Mrs",
"Mr" : "Mr",
"Mrs" : "Mrs",
"Miss" : "Miss",
"Master" : "Master",
"Lady" : "Royalty"
}
# mapping title to dictionary_val
combined['Title'] = combined.Title.map(Title_Dictionary)
getTitles()
# pretty-print combined data
# print(combined.shape)
# print(tabulate(combined.describe(), headers='keys', tablefmt='psql'))
# print(tabulate(combined[:100][:], headers='keys', tablefmt='psql'))
# imputing 'Age' values according to the section the person belongs
# instead of taking median of values
# in order to understand the reason for this method,
# run the following commands :
#####################################################################
# features = ['Sex', 'Pclass', 'Title']
# grouped = combined.groupby(features)
# print(tabulate(grouped.median(), headers='keys', tablefmt='psql'))
#####################################################################
# notice that different sections of people [differentiated by `features`]
# have different medians of age
def processAge():
global combined
def fillAges(row):
if row['Sex']=='female' and row['Pclass'] == 1:
if row['Title'] == 'Miss':
return 30
elif row['Title'] == 'Mrs':
return 45
elif row['Title'] == 'Officer':
return 49
elif row['Title'] == 'Royalty':
return 39
elif row['Sex']=='female' and row['Pclass'] == 2:
if row['Title'] == 'Miss':
return 20
elif row['Title'] == 'Mrs':
return 30
elif row['Sex']=='female' and row['Pclass'] == 3:
if row['Title'] == 'Miss':
return 18
elif row['Title'] == 'Mrs':
return 31
elif row['Sex']=='male' and row['Pclass'] == 1:
if row['Title'] == 'Master':
return 6
elif row['Title'] == 'Mr':
return 41.5
elif row['Title'] == 'Officer':
return 52
elif row['Title'] == 'Royalty':
return 40
elif row['Sex']=='male' and row['Pclass'] == 2:
if row['Title'] == 'Master':
return 2
elif row['Title'] == 'Mr':
return 30
elif row['Title'] == 'Officer':
return 41.5
elif row['Sex']=='male' and row['Pclass'] == 3:
if row['Title'] == 'Master':
return 6
elif row['Title'] == 'Mr':
return 26
combined.Age = combined.apply(lambda r: fillAges(r) if np.isnan(r['Age']) else r['Age'], axis=1)
status('age')
processAge()
# print(combined.info())
def processNames():
global combined
# clean-up of `Name` variable
combined.drop('Name', axis=1, inplace=True)
titles_dummies = pd.get_dummies(combined['Title'], prefix='Title')
combined = pd.concat([combined, titles_dummies], axis=1)
combined.drop('Title', axis=1, inplace=True)
status('names')
processNames()
# print(tabulate(combined.head(), headers='keys', tablefmt='psql'))
def processFares():
global combined
combined.Fare.fillna(combined.Fare.mean(), inplace=True)
status('fare')
processFares()
def processEmbarked():
global combined
# two missing embarked values - filling them with the most frequent one (S)
combined.Embarked.fillna('S',inplace=True)
# dummy encoding
embarked_dummies = pd.get_dummies(combined['Embarked'],prefix='Embarked')
combined = pd.concat([combined,embarked_dummies],axis=1)
combined.drop('Embarked',axis=1,inplace=True)
status('embarked')
processEmbarked()
def processCabin():
global combined
# replacing missing cabins with U (for Uknown)
combined.Cabin.fillna('U',inplace=True)
# mapping each Cabin value with the cabin letter
combined['Cabin'] = combined['Cabin'].map(lambda c : c[0])
# dummy encoding ...
cabin_dummies = pd.get_dummies(combined['Cabin'],prefix='Cabin')
combined = pd.concat([combined,cabin_dummies],axis=1)
combined.drop('Cabin',axis=1,inplace=True)
status('cabin')
processCabin()
# print(combined.info())
def processSex():
global combined
# mapping string values to numerical one
combined['Sex'] = combined['Sex'].map({'male':1,'female':0})
status('sex')
# creates a 2d matrix out of 2 lists of same shape
# pclass_sample = np.column_stack((combined['Pclass'][:50], combined['Sex'][:50]))
# print(tabulate(pclass_sample, headers='keys', tablefmt='psql'))
processSex()
def processPclass():
global combined
pclass_dummies = pd.get_dummies(combined['Pclass'],prefix="Pclass")
combined = | pd.concat([combined,pclass_dummies],axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
import subprocess
import json
import os
import io
from multiprocessing import Pool
import multiprocessing
import multiprocessing.pool
from operator import itemgetter
import random
import string
import pickle
import copy
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
import pysam
import mip_classes as mod
import pandas as pd
from pandas.errors import MergeError
import gzip
from primer3 import calcHeterodimerTm
import primer3
import traceback
from msa_to_vcf import msa_to_vcf as msa_to_vcf
import itertools
import sys
import allel
from Bio import SeqIO
print("functions reloading")
# backbone dictionary
mip_backbones = {
"hybrid_bb": "AGATCGGAAGAGCACACGTGACTCGCCAAGCTGAAGNNNNNNNNNNNN",
"hybrid_split": "NNNNAGATCGGAAGAGCACACGTGACTCGCCAAGCTGAAGNNNNNNNNNN",
"hybrid_split_hp": "AGATCGGAAGAGCACACGTGACTCGCCAAGCTGAAGNNNNNNNNNN",
"gc_bb": "GCAGATCGGAAGAGCACACCTCGCCAAGCTTTCGGCNNNNNNNNNNNN",
"slx_bb": "CTTCAGCTTCCCGATCCGACGGTAGTGTNNNNNNNNNNNN"
}
"""
# Below class allows processors from a pool from multiprocessing module to
create processor pools of their own.
# http://mindcache.io/2015/08/09/python-multiprocessing-module-daemonic-processes-are-not-allowed-to-have-children.html
class NoDaemonProcess(multiprocessing.Process):
# make 'daemon' attribute always return False
def _get_daemon(self):
return False
def _set_daemon(self, value):
pass
daemon = property(_get_daemon, _set_daemon)
# We sub-class multiprocessing.pool.Pool instead of multiprocessing.Pool
# because the latter is only a wrapper function, not a proper class.
class NoDaemonProcessPool(multiprocessing.pool.Pool):
Process = NoDaemonProcess
"""
# above code was broken when switching to python 3. Below is taken from:
# https://stackoverflow.com/questions/6974695/python-process-pool-non-daemonic/8963618#8963618
class NoDaemonProcess(multiprocessing.Process):
@property
def daemon(self):
return False
@daemon.setter
def daemon(self, value):
pass
class NoDaemonContext(type(multiprocessing.get_context())):
Process = NoDaemonProcess
# We sub-class multiprocessing.pool.Pool instead of multiprocessing.Pool
# because the latter is only a wrapper function, not a proper class.
class NoDaemonProcessPool(multiprocessing.pool.Pool):
def __init__(self, *args, **kwargs):
kwargs['context'] = NoDaemonContext()
super(NoDaemonProcessPool, self).__init__(*args, **kwargs)
# Exception wrapper for multiprocessing taken from
# https://stackoverflow.com/questions/6126007/python-getting-a-traceback-from-a-multiprocessing-process/26096355#26096355
class ExceptionWrapper(object):
def __init__(self, ee, exc):
self.ee = ee
self.exc = exc
__, __, self.tb = sys.exc_info()
def re_raise(self):
print(self.exc)
raise self.ee.with_traceback(self.tb)
###############################################################
# Region prep related functions
###############################################################
def coordinate_to_target(coordinates, snp_locations, capture_size):
""" Create MIP targets starting from a snp file that is produced offline,
usually from Annovar. This is a tab separated file with the following
chr1 2595307 2595307 A G rs3748816.
This can be generalized to any target with coordinates.
"""
# create target regions to cover all snps
# start by getting snps on same chromosome together
snp_chroms = {}
reference_snp_locations = rsl = coordinates
for r in rsl:
chrom = rsl[r]["chrom"]
try:
snp_chroms[chrom].append([rsl[r]["begin"],
rsl[r]["end"]])
except KeyError:
snp_chroms[chrom] = [[rsl[r]["begin"],
rsl[r]["end"]]]
# merge snps that are too close to get separate regions
# the length should be twice the capture size
merged_snp_chroms = {}
for c in snp_chroms:
merged_snp_chroms[c] = merge_overlap(snp_chroms[c], 2 * capture_size)
# create regions for alignment
for c in merged_snp_chroms:
regions = merged_snp_chroms[c]
for r in regions:
snps_in_region = []
for s in reference_snp_locations:
if ((reference_snp_locations[s]["chrom"] == c)
and (r[0] <= reference_snp_locations[s]["begin"]
<= reference_snp_locations[s]["end"] <= r[1])):
snps_in_region.append(s)
r.append(snps_in_region)
for reg in regions:
snps = reg[2]
reg_begin = reg[0]
reg_end = reg[1]
reg_locations = []
for s in snps:
s_locations = []
locations = snp_locations[s]
ref_location = reference_snp_locations[s]
ref_begin = ref_location["begin"]
ref_end = ref_location["end"]
left_flank_buffer = ref_begin - reg_begin + capture_size
right_flank_buffer = reg_end - ref_end + capture_size
for l in locations:
snp_chrom = l["chrom"]
snp_begin = l["begin"]
snp_end = l["end"]
tar_begin = snp_begin - left_flank_buffer
tar_end = snp_end + right_flank_buffer
s_locations.append([snp_chrom, tar_begin, tar_end])
reg_locations.append(s_locations)
reg.append(reg_locations)
# create target coordinate for each region
target_coordinates = {}
for c in merged_snp_chroms:
regions = merged_snp_chroms[c]
for reg in regions:
region_name = "-".join(reg[2])
region_targets = reg[3][0]
for i in range(len(region_targets)):
reg_name = region_name + "-" + str(i)
if reg_name in target_coordinates:
print((reg_name, " is already in targets!"))
else:
target_coordinates[reg_name] = region_targets[i]
return target_coordinates
def rsid_to_target(resource_dir, snp_file):
""" Create MIP targets starting from a snp file that is produced offline,
usually from Annovar. This is a tab separated file with the following
content: chr1 2595307 2595307 A G rs3748816.
This can be generalized to any target with coordinates.
"""
# one snp can have multiple locations on the reference genome,
# this can happen with snps in regions where there are multiple different
# assemblies (HLA locus, for example). So first step is to get each of
# these locations in the genome.
snp_locations = {}
capture_types = {}
with io.open(os.path.join(resource_dir, snp_file),
encoding="utf-8") as infile:
for line in infile:
newline = line.strip().split("\t")
rsid = newline[5]
try:
# update the location dictionary if the rsid is already present
temp_dic = {"chrom": newline[0],
"begin": int(newline[1]),
"end": int(newline[2]),
"ref_base": newline[3],
"alt_bases": [newline[4]]}
# check if this location is already in the dict
# append the new alternative base to the dict
for snp in snp_locations[rsid]:
if ((snp["begin"] == temp_dic["begin"])
and (snp["end"] == temp_dic["end"])
and (snp["chrom"] == temp_dic["chrom"])
and (snp["ref_base"] == temp_dic["ref_base"])):
snp["alt_bases"].append(temp_dic["alt_bases"][0])
break
else:
# add the snp dict if the location is different than what
# is present in the location dict.
snp_locations[rsid].append(temp_dic)
except KeyError:
# add the new rsid to location dict if it is not already there
snp_locations[rsid] = [temp_dic]
capture_types[rsid] = newline[6]
# one reference location for each snp is required
# alternative assambly chromosomes have an underscore in their names,
# so that will be utilized to get the location in the orignal assembly,
# i.e. the chromosome that does not have the underscore
# (chr7 and not chr7_alt08)
reference_snp_locations = {}
problem_snps = []
for s in snp_locations:
if len(snp_locations[s]) == 1:
reference_snp_locations[s] = snp_locations[s][0]
else:
for i in range(len(snp_locations[s])):
if len(snp_locations[s][i]["chrom"].split("_")) == 1:
reference_snp_locations[s] = snp_locations[s][i]
break
else:
print(("Short chromosome name not found! "
"Please check the output list."))
problem_snps.append(s)
reference_snp_locations[s]["capture_type"] = capture_types[s]
return reference_snp_locations, snp_locations
def gene_to_target(gene_list, species):
target_coordinates = {}
for gene in gene_list:
e = get_exons(get_gene(gene,
get_file_locations()[species]["refgene"],
alternative_chr=1))
try:
target_coordinates[gene] = {"chrom": e["chrom"],
"begin": e["begin"],
"end": e["end"]}
except KeyError:
target_coordinates[gene] = {"chrom": np.nan,
"begin": np.nan,
"end": np.nan}
return target_coordinates
def gene_to_target_exons(gene_list, species, exon_list):
target_coordinates = {}
for i in range(len(gene_list)):
gene = gene_list[i]
exons_wanted = exon_list[i]
gene_exons = get_exons(get_gene(gene,
get_file_locations()[species]["refgene"],
alternative_chr=1))
exons = gene_exons["exons"]
if gene_exons["orientation"] == "-":
exons.reverse()
if exons_wanted == "all":
for j in range(len(exons)):
e = exons[j]
tar_name = "-".join([gene, "exon", str(j)])
target_coordinates[tar_name] = {"chrom": gene_exons["chrom"],
"begin": e[0],
"end": e[1]}
else:
for j in exons_wanted:
try:
e = exons[j]
tar_name = "-".join(gene, "exon", str(j))
target_coordinates[tar_name] = {
"chrom": gene_exons["chrom"],
"begin": e[0],
"end": e[1]}
except IndexError:
print(("Exon ", j, " does not exist for gene ", gene))
return target_coordinates
def parse_alignment(reg_file):
""" Create a rinfo dictionary from a rinfo file."""
reg_dic = {}
with open(reg_file, "r") as infile:
for line in infile:
if line.startswith("REGION"):
newline = line.strip().split("\t")
key1 = newline[1].split(":")[0]
key2 = newline[1].split(":")[1]
if key1 not in reg_dic:
reg_dic[key1] = {key2: {"copyname": newline[2],
"chr": int(newline[3][3:]),
"begin": int(newline[4]),
"end": int(newline[5]),
"ori": (newline[6] == "F")}}
else:
reg_dic[key1][key2] = {"copyname": newline[2],
"chr": int(newline[3][3:]),
"begin": int(newline[4]),
"end": int(newline[5]),
"ori": (newline[6] == "F")}
return reg_dic
def update_rinfo_file(rinfo_file, update_file, output_file):
"""Update a rinfo file with the lines provided in the update_file.
This function will read all lines from a rinfo file and an update file.
First two columns of rinfo files describe the parameters while the
rest assign values. All the lines in the update file which share the
first column with a line in the original file will replace that line
in the original file. All other lines in the original file will remain.
"""
# read the update file
update_dict = {}
with open(update_file) as infile:
for line in infile:
if not line.startswith("#"):
newline = line.strip().split("\t")
update_dict[(newline[0], newline[1])] = line
# read the rinfo file and update as appropriate
with open(rinfo_file) as infile, open(output_file, "w") as outfile:
for line in infile:
if not line.startswith("#"):
newline = line.strip().split("\t")
line_key = (newline[0], newline[1])
try:
outfile.write(update_dict[line_key])
except KeyError:
outfile.write(line)
else:
outfile.write(line)
def get_target_coordinates(res_dir, species, capture_size,
coordinates_file=None, snps_file=None,
genes_file=None):
"""Extract MIP target coordinates from provided files."""
capture_types = {}
# Get target coordinates specified as genomic coordinates
if coordinates_file is None:
region_coordinates = {}
coord_names = []
else:
coordinates_file = os.path.join(res_dir, coordinates_file)
try:
coord_df = pd.read_table(coordinates_file, index_col=False)
coord_names = coord_df["Name"].tolist()
coord_df.rename(columns={"Name": "name", "Chrom": "chrom",
"Start": "begin", "End": "end"}, inplace=True)
region_coordinates = coord_df.set_index("name").to_dict(
orient="index")
# update capture types of targets
for g in region_coordinates:
if g not in capture_types:
capture_types[g] = region_coordinates[g]["CaptureType"]
except IOError:
print(("Target coordinates file {} could not be found.").format(
(coordinates_file)))
region_coordinates = {}
coord_names = []
# Get Gene target coordinates
if genes_file is None:
gene_coordinates = {}
gene_names = []
else:
# get the alias file (gene name to gene id mapping) if available
try:
with open(get_file_locations()[species]["alias"]) as infile:
alias = json.load(infile)
except (KeyError, IOError):
pass
try:
genes_file = os.path.join(res_dir, genes_file)
genes_df = pd.read_table(genes_file, index_col=False)
gene_names = genes_df["Gene"].tolist()
genes = genes_df.set_index("Gene").to_dict(orient="index")
gene_id_to_gene = {}
gene_ids = []
gene_coordinates = {}
for g in genes:
try:
if np.isnan(genes[g]["GeneID"]):
try:
gene_id = alias[g]
genes[g]["GeneID"] = gene_id
except KeyError:
print("""Alias for gene %s is not found.
Either provide a gene ID or use an alias
which is present in refgene file.""" % g)
continue
except NameError:
print(""" Gene ID is not provided for %s.
If gene name will be used to extract gene
ID an alias dictionary must be specified.
""" % g)
continue
except TypeError:
pass
gene_ids.append(genes[g]["GeneID"])
gene_id_to_gene[genes[g]["GeneID"]] = g
capture_types[g] = genes[g]["CaptureType"]
gene_id_coordinates = gene_to_target(gene_ids, species)
for gid in gene_id_coordinates:
gene_coordinates[gene_id_to_gene[gid]] = gene_id_coordinates[
gid]
except IOError:
print(("Target genes file {} could not be found.").format(
(genes_file)))
gene_coordinates = {}
gene_names = []
if snps_file is None:
snp_coordinates = {}
else:
# Get SNP target coordinates
try:
snps_file = os.path.join(res_dir, snps_file)
snp_df = pd.read_table(snps_file, index_col=False,
dtype={"Start": int, "End": int})
snp_df.rename(columns={"Name": "name", "Chrom": "chrom",
"Start": "begin", "End": "end"},
inplace=True)
snp_coordinates = snp_df.set_index("name").to_dict(orient="index")
for g in snp_coordinates:
if g not in capture_types:
capture_types[g] = "targets"
except IOError:
print(("Target SNPs file {} could not be found.").format(
(snps_file)))
snp_coordinates = {}
# merge coordinates dictionaries
all_coordinates = {}
all_coordinates.update(snp_coordinates)
all_coordinates.update(gene_coordinates)
all_coordinates.update(region_coordinates)
# Fix names that has unwanted characters
for c in list(all_coordinates.keys()):
clist = []
for ch in c:
if ch.isalnum():
clist.append(ch)
else:
clist.append("-")
newc = "".join(clist)
if newc != c:
print("%s is replaced with %s" % (c, newc))
all_coordinates[newc] = all_coordinates.pop(c)
capture_types[newc] = capture_types.pop(c)
target_regions, target_names = merge_coordinates(all_coordinates,
capture_size)
# prioritize gene names ond coordinate names over snp or other names
for t in list(target_names.keys()):
for n in target_names[t]:
if n in gene_names:
target_names[n] = target_names.pop(t)
target_regions[n] = target_regions.pop(t)
break
elif n in coord_names:
target_names[n] = target_names.pop(t)
target_regions[n] = target_regions.pop(t)
break
out_dict = {"target_regions": target_regions,
"target_names": target_names,
"capture_types": capture_types,
"gene_names": gene_names,
"snp_coordinates": snp_coordinates,
"gene_coordinates": gene_coordinates,
"region_coordinates": region_coordinates}
return out_dict
def merge_coordinates(coordinates, capture_size):
"""Merge overlapping coordinates for MIP targets.
Parameters
----------
coordinates: python dictionary
Coordinates to be merged in the form {target-name: {chrom: chrx,
begin: start-coordinate, end: end-coordinate}, ..}
capture_size: int
Anticipated MIP capture size. If two regions are as close as 2 times
this value, they will be merged.
Returns
-------
target_coordinates: python dictionary
merged coordinates dictionary
target_names: python dictionary
names of included targets in each merged region.
"""
# create target regions to cover all snps
# start by getting snps on same chromosome together
chroms = {}
for c in coordinates:
chrom = coordinates[c]["chrom"]
try:
chroms[chrom].append([coordinates[c]["begin"],
coordinates[c]["end"]])
except KeyError:
chroms[chrom] = [[coordinates[c]["begin"],
coordinates[c]["end"]]]
# merge snps that are too close to get separate regions
# the length should be twice the capture size
merged_chroms = {}
for c in chroms:
merged_chroms[c] = merge_overlap(chroms[c], 2 * capture_size)
# create regions for alignment
# create target coordinate for each region
target_coordinates = {}
target_names = {}
for c in merged_chroms:
regions = merged_chroms[c]
for reg in regions:
targets_in_region = []
for co in coordinates:
if (coordinates[co]["chrom"] == c
and reg[0] <= coordinates[co]["begin"]
<= coordinates[co]["end"] <= reg[1]):
targets_in_region.append(co)
region_name = targets_in_region[0]
target_names[region_name] = targets_in_region
r_start = reg[0]
r_end = reg[1]
target_coordinates[region_name] = [c, r_start, r_end]
return target_coordinates, target_names
def create_target_fastas(res_dir, targets, species, flank):
""" Create fasta files for a list of region coordinates provided as a dict
in the form {target1: [chrx, start, end], target2: [chrx, start, end], ..},
flank on both sides with the specified length. If beginning coordinate is
less than zero, reset the beginning coordinate to zero..
"""
for t in list(targets.keys()):
chrom = targets[t][0]
begin = targets[t][1] - flank + 1
if begin < 0:
begin = 0
end = targets[t][2] + flank
rk = chrom + ":" + str(begin) + "-" + str(end)
try:
with open(os.path.join(res_dir, t + ".fa"), "w") as outfile:
outfile.write(get_fasta(rk, species, header=t))
except Exception as e:
print(("Fasta file for {} could not be created, "
"due to error {}. It will be removed"
" from the target list.").format(t, e))
targets.pop(t)
return
def add_fasta_targets(res_dir, fasta_files, fasta_capture_type):
fasta_sequences = {}
capture_types = {}
for f in fasta_files:
f_file = os.path.join(res_dir, f)
try:
fasta_sequences.update(fasta_parser(f_file))
except IOError:
print(("Fasta file {} could not be found.").format(f_file))
for f in list(fasta_sequences.keys()):
flist = []
for fch in f:
if fch.isalnum():
flist.append(fch)
else:
flist.append("-")
newf = "".join(flist)
if f != newf:
print("%s is changed to %s." % (f, newf))
fasta_sequences[newf] = fasta_sequences.pop(f)
if newf not in capture_types:
capture_types[newf] = fasta_capture_type
with open(os.path.join(res_dir, newf + ".fa"), "w") as outfile:
outfile.write(">" + newf + "\n" + fasta_sequences[newf] + "\n")
return {"fasta_sequences": fasta_sequences, "capture_types": capture_types}
def set_genomic_target_alignment_options(target_regions, fasta_sequences,
identity, coverage, flank):
alignment_list = []
fasta_list = list(fasta_sequences.keys()) + list(target_regions.keys())
for t in fasta_list:
temp_dict = {"gene_name": t, "identity": identity}
try:
target_size = target_regions[t][2] - target_regions[t][1]
fasta_size = target_size + 2 * flank
except KeyError:
fasta_size = len(fasta_sequences[t])
cover = round(coverage * 100 / fasta_size, 1)
temp_dict["options"] = []
if cover > 100:
cover = 100
temp_dict["coverage"] = cover
if fasta_size < 100:
temp_dict["options"].extend(["--notransition", "--step=10",
"--ambiguous=iupac"])
elif fasta_size < 1000:
temp_dict["options"].extend(["--notransition", "--step=10",
"--ambiguous=iupac"])
elif fasta_size < 5000:
temp_dict["options"].extend(["--notransition",
"--step=" + str(int(fasta_size/10)),
"--ambiguous=iupac"])
else:
temp_dict["options"].extend(["--notransition",
"--step=" + str(int(fasta_size/10)),
"--ambiguous=iupac"])
alignment_list.append(temp_dict)
return alignment_list
def align_region_multi(alignment_list, pro):
"""Parallelize a list of lastz alignments."""
p = Pool(pro)
p.map_async(align_region_worker, alignment_list)
p.close()
p.join()
return
def align_region_worker(l):
"""Worker function for align_region_multi.
Aligns a single fasta query file to a target fasta file. Both query
and target fasta files can be multi sequence files.
"""
# get parameters from the input list
# first item is the fasta file name, including file extension
region_key = l[0]
# second item holds the run directory for lastz
resource_dir = l[1]
# output file is the target name + ".al" where the alignment output
# will be saved.
output_file = l[2]
# target fasta file is usually the reference genome
target_fasta = l[3]
# each action item will be appended to the target or query argument
# within brackets. [unmask] and [multiple] are important target actions
# unmask: allows starting alignments in masked(lowercase) parts of the
# target multiple: indicates there are multiple sequences in the target
# file (e.g. chromosomes, contigs)
target_actions = l[4]
# query file is always treated as a multiple sequence file
# so there is no need for the multiple action
query_actions = l[5]
# percent cutoff value for identity/coverage of query to target. This only
# affects reporting and not the alignment process itself.
identity_cutoff = l[6]
coverage_cutoff = l[7]
# format of the output, follows --format: argument in lastz
# if format is general, it should be followed by a comma separated list of
# fields to output, e.g. general:name1,text1,name2,text2,diff,score would
# seq of target, output the name of the query, sequence of the query, name
# of the target, a string showing the alignment and the alignment score
output_format = l[8]
# additional options to pass to lastz
options = l[9]
query_fasta = os.path.join(resource_dir, region_key)
# create target actions text
if len(target_actions) > 0:
target_act = "[" + ",".join(target_actions) + "]"
else:
target_act = ""
# create query actions text
if len(query_actions) > 0:
query_act = "[" + ",".join(query_actions) + "]"
else:
query_act = ""
# create the command list to pass to the processor
comm = ["lastz_32",
target_fasta + target_act,
query_fasta + query_act,
"--output=" + os.path.join(resource_dir, output_file),
"--format=" + output_format,
"--filter=identity:" + str(identity_cutoff),
"--filter=coverage:" + str(coverage_cutoff)]
# add any extra options to the end of the command
comm.extend(options)
# run the command using subprocess module
subprocess.check_output(comm)
return
def align_genes_for_design(fasta_list, res_dir,
alignment_types=["differences", "general"],
species="hs", num_processor=30):
"""Perform specified alignments given in an alignment dict.
This functions is called from align_targets function for the initial
target alignment to the reference genome.
It align sequences given in an alignment dict which contains alignment
specifics. Each entry in this dict must have a corresponding fasta file in
the res_dir specified. The alignment is performed against the reference
genome. This function merely prepares a list of commands to pass to
align_genes_for_design_worker function to carry out alignments in
parallel where multiple processors are available. Two types of alignment
outputs will be generated; one "general" informative about the alignment
such as where the alignment starts and ends, what is the percent identity,
coverage etc. The second output is the differences between the aligned
sequences, showing at which positions there are nucleotide changes and
what the changes are.
Parameters
----------
fasta_list: list
A list of dictionaries each of which contains specifics
for a single alignment, such as the name of the fasta file, coverage
and identity cut offs and any additional alignment parameters that are
passed to LastZ.
res_dir: str
Path to working directory where input and output files are located.
alignment_types: list
List of alignment types to be performed. Only "general" and/or
"differences" options are allowed.
species: str
Species whose reference genome will be used for alignment.
num_processor: int
Number of processors available for parallel processing.
"""
region_list = []
for gene_dict in fasta_list:
gene_name = gene_dict["gene_name"]
# percent cutoff value for identity/coverage of query to target.
# This only affects reporting and not the alignment process itself.
identity = gene_dict["identity"]
coverage = gene_dict["coverage"]
options = gene_dict["options"]
# alignment target is the reference genome of the specified species.
target = get_file_locations()[species]["fasta_genome"]
# alignment output should have the following fields.
# These are the bare minimum to be able to parse the alignment later.
out_fields = ["name1", "strand1", "zstart1", "end1", "length1",
"name2", "strand2", "zstart2", "end2", "zstart2+",
"end2+", "length2", "identity", "coverage"]
out_fields = ",".join(out_fields)
gen_out = "general:" + out_fields
# output fields for "differences" is fixed; it outputs the differences
# between the aligned sequence and the target.
dif_out = "differences"
if not os.path.exists(res_dir):
os.makedirs(res_dir)
# prepare a list of commands to feed to lastz for both alignment types
# i.e. "general" and "differences". Some of the additional parameters
# we are supplying here are the target and query actions.
# each action item will be appended to the target or query argument
# within brackets. [unmask] and [multiple] are important target actions
# unmask: allows starting alignments in masked(lowercase) parts of the
# target multiple: indicates there are multiple sequences in the target
# file (e.g. chromosomes, contigs)
if "general" in alignment_types:
al = [gene_name + ".fa", res_dir, gene_name + ".al", target,
["multiple", "unmask", "nameparse=darkspace"],
["unmask", "nameparse=darkspace"],
identity, coverage, gen_out, options]
region_list.append(al)
if "differences" in alignment_types:
al = [gene_name + ".fa", res_dir, gene_name + ".differences",
target, ["multiple", "unmask", "nameparse=darkspace"],
["unmask", "nameparse=darkspace"],
identity, coverage, dif_out, options]
region_list.append(al)
align_region_multi(region_list, num_processor)
return
def merge_alignments(resource_dir, fasta_list, output_prefix="merged"):
""" Merge the results of "general" type lastZ alignments into a
single file. This is used to process the alignment results from the
align_genes_for_design function where target sequences are aligned
against the reference genome.
Parameters
----------
resource_dir: str
Path to working directory where the alignment outputs are.
fasta_list: list
A list of dictionaries each of which has the specifics for a single
sequence alignment. It is used only to get alignment file names here.
output_prefix: str
Name for the output file. This will be appended by ".al" extension.
"""
# create a list for each alignment type (general and differences)
als_out = []
with open(os.path.join(
resource_dir, output_prefix + ".al"), "w") as alignment_file:
for f in fasta_list:
fnum = 0
with open(os.path.join(resource_dir, f + ".al")) as alignment:
linenum = 0
for line in alignment:
if linenum > 0:
als_out.append(line.strip())
elif fnum == 0:
als_out.append(line.strip())
linenum += 1
else:
linenum += 1
fnum += 0
alignment_file.write("\n".join(als_out))
return
def merge_alignment_diffs(resource_dir, fasta_list, output_prefix="merged"):
""" Merge the results of "differences" type lastZ alignments into a
single file. This is used to process the alignment results from the
align_genes_for_design function where target sequences are aligned
against the reference genome.
Parameters
----------
resource_dir: str
Path to working directory where the alignment outputs are.
fasta_list: list
A list of dictionaries each of which has the specifics for a single
sequence alignment. It is used only to get alignment file names here.
output_prefix: str
Name for the output file. This will be appended by ".al" extension.
"""
# create a list for each alignment type (general and differences)
diffs_out = []
with open(os.path.join(
resource_dir, output_prefix + ".differences"), "w") as diff_file:
for f in fasta_list:
fnum = 0
with open(os.path.join(resource_dir, f + ".differences")) as diffs:
for d in diffs:
diffs_out.append(d.strip())
fnum += 0
diff_file.write("\n".join(diffs_out))
return
def alignment_parser(wdir, name, spacer=0, gene_names=[]):
""" Parse merged genome alignment results file which is generated by
align_genes_for_design function to align design targets to reference
genomes. One query (target region) may have multiple alignments to the
genome.
Parameters
----------
wdir: str
Path to working directory
name: str
File name for the merged alignment file
spacer: int
Spacer length to use when merging overlapping regions. If two regions
are not overlapping but the distance between them is smaller than the
spacer, they will be merged.
Returns
-------
A list of dictionaries:
target_regions: merged genomic coordinates for grouped targets.
This dictionary is used as the final target regions.
For example: {r1: [[chr1, 100, 200], [chr3, 30, 300]],
r3: [chr4, 0, 300]]}
region_names: names for each region.
For example: {r1: [r1, r2], r3: [r3]}
imperfect_aligners: names of the target regions for which a perfect
alignment to the reference genome has not been found.
"""
alignment_dict = {}
# open alignment files
with open(os.path.join(wdir, name + ".al")) as infile:
# each line in the file is a separate alignment for which we'll
# prepare a dictionary.
for line in infile:
newline = line.strip().split("\t")
# first line has column names
if line.startswith("#"):
colnames = [newline[0][1:]]
colnames.extend(newline[1:])
else:
temp_dict = {}
for i in range(len(colnames)):
col = colnames[i]
value = newline[i]
temp_dict[col] = value
query_name = temp_dict["name2"]
try:
alignment_dict[query_name].append(temp_dict)
except KeyError:
alignment_dict[query_name] = [temp_dict]
# go through each target sequence and each alignment for that
# target to where in the genome it was aligned to.
aligned_regions = {}
for query in alignment_dict:
aligned_regions[query] = []
for a in alignment_dict[query]:
chrom = a["name1"]
begin = int(a["zstart1"])
end = int(a["end1"])
aligned_regions[query].append([chrom, begin, end])
# check for overlapping alignments. These can be the same target aligning
# to overlapping regions in the genome (internal duplications) or
# different targets aligning to the same (or overlapping) regions in the
# genome (paralogus sequences).
# overlapping regions will be grouped together to form the final target
# regions for probe design.
overlaps = {}
for q1 in aligned_regions:
# each target will have itself as overlapping
overlaps[q1] = [q1]
# get the genomic regions q1 was aligned to
reg1 = aligned_regions[q1]
# go through each region
for r1 in reg1:
# check overlap with other target regions
for q2 in aligned_regions:
if q1 == q2:
continue
reg2 = aligned_regions[q2]
for r2 in reg2:
if check_overlap(r1, r2, spacer):
overlaps[q1].append(q2)
break
# go through the overlaps and remove the overlapping overlaps
# e.g. if a overlaps b, b overlaps a also. We'll have {a: [a,b], b: [b, a]}
# in the overlaps dict. We want only one of these, so reduce to {a:[a, b]}
overlap_found = True
# place a failsafe counter to avoid unforseen infinite loops
exit_counter = 0
while (overlap_found and (exit_counter < 10000)):
overlap_found = False
for o in list(overlaps.keys()):
# check if o is still in the overlaps and has not been removed
if o in overlaps:
val = overlaps[o]
# get the overlapping regions for "val" and add them
# to overlapping regions for "o", then remove "val"
for v in val:
if (v in overlaps) and (o in overlaps) and (o != v):
overlaps[o].extend(overlaps[v])
overlaps.pop(v)
overlap_found = True
if exit_counter > 9999:
print("Overlap removal while loop limit is reached.")
# clean up overlapping region lists by removing duplicates.
for o in list(overlaps.keys()):
overlaps[o] = sorted(list(set(overlaps[o])))
##########################################################################
# create a new dictionary for target regions.
# for each target group in overlaps, we'll have genomic coordinates
# that will be used as final targets.
##########################################################################
# group regions according to their chromosomes
separated_regions = {}
for o in overlaps:
sep = separated_regions[o] = {}
for g in overlaps[o]:
regs = aligned_regions[g]
for r in regs:
try:
sep[r[0]].append(r[1:])
except KeyError:
sep[r[0]] = [r[1:]]
# merge each overlapping region
separated_merged_regions = {}
for s in separated_regions:
merged_sep = separated_merged_regions[s] = {}
for chrom in separated_regions[s]:
merged_region = merge_overlap(separated_regions[s][chrom])
merged_sep[chrom] = merged_region
###########################################
# organize target regions, assign region names based on the original
# target names. Assign a reference target.
###########################################
# sort target regions based on the length of
# chromosome name and the length of region. Sort is based on the region
# size and chromosome name is used as a tie-breaker
# to distinguish alternate contigs and not use them as reference, but
# it is not absolutely necessary and it would not behave as expected
# when chromosome names do not follow that convention, i.e, chr6 and
# chr6_altXYZ.
for ar in list(aligned_regions.keys()):
regs = aligned_regions[ar]
for r in regs:
r.append(0 - len(r[0]))
r.append(r[2] - r[1] + 1)
aligned_regions[ar] = sorted(regs, key=itemgetter(4, 3),
reverse=True)
target_regions = {}
region_names = {}
regions = separated_merged_regions
for r in regions:
target_regions[r] = []
for chrom in regions[r]:
for l in regions[r][chrom]:
temp_region = [chrom]
temp_region.extend(l)
temp_region.append(-len(chrom))
temp_region.append(l[1] - l[0])
target_regions[r].append(temp_region)
# sort target regions per target group based on the length of
# chromosome name and the length of region. Chromosome name is used
# to distinguish alternate contigs and not use them as reference, but
# it is not absolutely necessary and it would not behave as expected
# when chromosome names do not follow that convention, i.e, chr6 and
# chr6_altXYZ
target_regions[r] = sorted(target_regions[r], key=itemgetter(4, 3),
reverse=True)
# assign names to grouped targets
reg_names = []
# for each region we go back to individual region alignments and see
# if the individual alignment overlaps with this region. If it does
# we use the individual regions name for this region within the group.
for i in range(len(target_regions[r])):
reg = target_regions[r][i]
reg_chrom = reg[0]
reg_begin = reg[1]
reg_end = reg[2]
for c in aligned_regions:
main_region = aligned_regions[c][0]
if (reg_chrom == main_region[0]
and reg_begin <= main_region[1]
and reg_end >= main_region[2]):
reg_names.append(c)
break
else:
reg_names.append("na")
# assign a reference region for each group based on gene names provided
# this is mainly to used to have better names for regions. For example,
# if a gene is a target as well as a snp, we would like the gene name
# to be the name of the group as opposed to the SNP's name.
ref_found = False
for g in gene_names:
if g in reg_names:
ref_found = True
ref_index = reg_names.index(g)
ref_name = g
break
if not ref_found:
ref_name = r
ref_index = 0
ref_region = target_regions[r].pop(ref_index)
reg_names.pop(ref_index)
target_regions[r] = [ref_region] + target_regions[r]
reg_names = [ref_name] + reg_names
region_names[ref_name] = reg_names
target_regions[reg_names[0]] = target_regions.pop(r)
overlaps[reg_names[0]] = overlaps.pop(r)
# after the alignments are done, some regions will not have proper names
# and some will have "na". We'll change those to avoid repeating
# names.
for r in list(region_names.keys()):
rnames = region_names[r]
nnames = []
rn_counts = {}
for rn in rnames:
rnc = rnames.count(rn)
rn_counts[rn] = {"total_count": rnc,
"used_count": 0}
for rn in rnames:
if rn_counts[rn]["total_count"] > 1:
nnames.append(rn + "-" + str(rn_counts[rn]["used_count"]))
rn_counts[rn]["used_count"] += 1
else:
nnames.append(rn)
region_names[r] = nnames
# find target regions that could not be perfectly aligned to the genome
# these are usually extragenomic sequences supplied in fasa files, such as
# certain TCR haplotypes.
imperfect_aligners = []
for r in alignment_dict:
best_score = 0
alignments = alignment_dict[r]
for a in alignments:
cov = int(a["covPct"].split(".")[0])
idt = int(a["idPct"].split(".")[0])
score = cov * idt
if score > best_score:
best_score = score
if best_score != 10000:
imperfect_aligners.append(r)
return [target_regions, region_names, imperfect_aligners, aligned_regions,
overlaps]
def set_intra_alignment_options(target_regions, identity, coverage,
max_allowed_indel_size):
"""Set lastZ alignment options for intraparalog_aligner function."""
alignment_options_dict = {}
for t in target_regions:
temp_dict = {"gene_name": t, "identity": identity}
reference_len = target_regions[t][0][-1]
small_target = 0
for r in target_regions[t]:
if r[-1] < coverage:
small_target += 1
try:
smallest_target = min([smallest_target, r[-1]])
except NameError:
smallest_target = int(r[-1])
if small_target > 0:
print(("{} targets within {} are smaller than intra_coverage"
" value. This means that those targets will not be aligned."
" Smallest target's length was {}. Set intra_coverage"
" to a value smaller than this value to align all regions."
).format(small_target, t, smallest_target))
cover = round(coverage * 100 / reference_len, 1)
gap_open_penalty = 400
gap_extend_penalty = 30
ydrop = max_allowed_indel_size * gap_extend_penalty + gap_open_penalty
alignment_opts = ["--ydrop=" + str(ydrop), "--notransition",
"--ambiguous=iupac", "--noytrim"]
temp_dict["options"] = alignment_opts
if cover > 100:
cover = 100
temp_dict["coverage"] = cover
alignment_options_dict[t] = temp_dict
return alignment_options_dict
def intraparalog_aligner(resource_dir,
target_regions,
region_names,
imperfect_aligners,
fasta_sequences,
species,
num_process,
alignment_options_dict={}):
"""Align all regions within a target group.
Align all regions within a target group to the region selected
as the reference region.
Returns
-------
Returns nothing. It creates query.fa target.fa and .aligned files for each
target region group. These alignment have no genomic coordinates, so
all coordinates are relative to the given sequence. Also, the region names
are indicated as the reference gene name + copy name as this is originally
intended for use in paralog genes.
"""
alignment_commands = []
out_fields = "name1,strand1,zstart1,end1,length1,name2,strand2,zstart2,"
out_fields = out_fields + "end2,zstart2+,end2+,length2,identity,coverage"
gen_out = "general:" + out_fields
diff_out = "differences"
for t in target_regions:
alignment_options = alignment_options_dict[t]["options"]
identity = alignment_options_dict[t]["identity"]
coverage = alignment_options_dict[t]["coverage"]
tar_regs = target_regions[t]
# create a fasta file for the reference copy (or reference region)
target_keys = [tr[0] + ":" + str(tr[1] + 1)
+ "-" + str(tr[2]) for tr in tar_regs]
query_key = target_keys[0]
with open(os.path.join(resource_dir, t + ".query.fa"), "w") as outfile:
outfile.write(">" + t + "_ref\n")
outfile.write(get_sequence(query_key, species))
# create a fasta file that includes all target regions within a group.
with open(os.path.join(
resource_dir, t + ".targets.fa"), "w") as outfile:
outfile_list = []
for i in range(len(target_keys)):
k = target_keys[i]
cname = "_C" + str(i)
outfile_list.append(">" + t + cname)
outfile_list.append(get_sequence(k, species))
# add extragenomic (i.e. imperfect_aligners)
ols = region_names[t]
o_count = 0
for o in ols:
if o in imperfect_aligners:
outfile_list.append(">" + t + "_X" + str(o_count))
outfile_list.append(fasta_sequences[o])
o_count += 1
outfile.write("\n".join(outfile_list))
comm = [t + ".query.fa", resource_dir, t + ".aligned",
os.path.join(resource_dir, t + ".targets.fa"),
["multiple", "unmask", "nameparse=darkspace"],
["unmask", "nameparse=darkspace"],
identity, coverage, gen_out,
alignment_options, species]
alignment_commands.append(comm)
comm = [t + ".query.fa", resource_dir,
t + ".differences",
os.path.join(resource_dir, t + ".targets.fa"),
["multiple", "unmask", "nameparse=darkspace"],
["unmask", "nameparse=darkspace"],
identity, coverage,
diff_out, alignment_options, species]
alignment_commands.append(comm)
return align_region_multi(alignment_commands, num_process)
def intra_alignment_checker(family_name, res_dir, target_regions,
region_names):
"""
Parse intraparalog_aligner results.
Following a within group alignment, check if any individual region
within the group has multiple aligned parts. If found, split that region
into multiple regions to be re-aligned by intraparalog_aligner.
"""
alignment_file = family_name + ".aligned"
new_regions = {}
with open(os.path.join(res_dir, alignment_file), "r") as alignment:
for line in alignment:
# extract the column names from the first line
if line.startswith("#"):
newline = line.strip().split("\t")
newline[0] = newline[0][1:]
colnames = list(newline)
# assign values of each column for each alignment
else:
newline = line.strip().split("\t")
temp_dict = {}
for i in range(len(colnames)):
temp_dict[colnames[i]] = newline[i]
alignment_id = temp_dict["name1"]
ci = alignment_id.split("_")[-1]
ct = ci[0]
if ct == "C":
cn = int(ci[1:])
tr = target_regions[cn]
start = tr[1] + int(temp_dict["zstart1"])
end = tr[1] + int(temp_dict["end1"])
size = end - start + 1
try:
new_regions[cn].append([tr[0], start, end,
0 - len(tr[0]), size])
except KeyError:
new_regions[cn] = [[tr[0], start, end,
0 - len(tr[0]), size]]
# check if any paralog is missing after aligning to the reference copy
targeted_copies = list(range(len(target_regions)))
missing_copies = set(targeted_copies).difference(new_regions.keys())
if len(missing_copies) > 0:
print(("Paralog copies {} were not successfully aligned to "
"the reference copy for the target {}. You may consider "
"relaxing the alignment filters '--local-coverage' "
"and '--local-identity'").format(
", ".join(map(str, sorted(missing_copies))), family_name))
ret_regions = []
rnames = []
for ci in sorted(new_regions):
ret_regions.extend(sorted(new_regions[ci]))
if len(new_regions[ci]) > 1:
print(("Paralog copy {} for target region {} was aligned "
"to the reference copy multiple times. This copy will "
"be treated as multiple independent paralog copies and "
"realigned to the reference copy as separate "
"targets.").format(ci, family_name))
for i in range(len(new_regions[ci])):
rnames.append(region_names[ci] + "-" + str(i))
else:
rnames.append(region_names[ci])
return [ret_regions, rnames]
def align_paralogs(res_dir, target_regions, region_names, imperfect_aligners,
fasta_sequences, species, identity, coverage,
max_allowed_indel_size, num_process):
alignment_options = set_intra_alignment_options(
target_regions, identity, coverage, max_allowed_indel_size)
intraparalog_aligner(res_dir, target_regions, region_names,
imperfect_aligners, fasta_sequences, species,
num_process, alignment_options)
for r in target_regions.keys():
ntr = intra_alignment_checker(r, res_dir, target_regions[r],
region_names[r])
target_regions[r] = ntr[0]
region_names[r] = ntr[1]
alignment_options = set_intra_alignment_options(
target_regions, identity, coverage, max_allowed_indel_size)
intraparalog_aligner(res_dir, target_regions, region_names,
imperfect_aligners, fasta_sequences, species,
num_process, alignment_options)
def get_missed_targets(original_target_regions, target_regions,
aligned_regions, min_target_size, flank, capture_types):
org_chroms = {}
new_chroms = {}
for o in original_target_regions:
org_regs = original_target_regions[o]
for org in org_regs:
try:
org_chroms[org[0]].append(org[1:3])
except KeyError:
org_chroms[org[0]] = [org[1:3]]
new_regs = target_regions[o]
for nrg in new_regs:
try:
new_chroms[nrg[0]].append(nrg[1:3])
except KeyError:
new_chroms[nrg[0]] = [nrg[1:3]]
uncovered_chroms = {}
for chrom in org_chroms:
try:
uncov = subtract_overlap(org_chroms[chrom], new_chroms[chrom])
if len(uncov) > 0:
uncovered_chroms[chrom] = uncov
except KeyError:
uncovered_chroms[chrom] = org_chroms[chrom]
not_aligned_coordinates = {}
for ar in aligned_regions:
main_region = aligned_regions[ar][0]
extra_count = 0
for uc in uncovered_chroms:
unc_regs = uncovered_chroms[uc]
for ur in unc_regs:
if len(overlap(main_region[1:3], ur)) > 0:
not_aligned_coordinates[
ar + "-extra-" + str(extra_count)
] = {"chrom": uc,
"begin": ur[0],
"end": ur[1]}
missed_target_regions, missed_target_names = merge_coordinates(
not_aligned_coordinates, flank)
for t in list(missed_target_regions.keys()):
target_size = (missed_target_regions[t][-1]
- missed_target_regions[t][-2] + 1)
if target_size < min_target_size:
missed_target_regions.pop(t)
missed_target_names.pop(t)
missed_capt_types = {}
for t in missed_target_names:
try:
missed_capt_types[t] = capture_types[t.split("extra")[0][:-1]]
except KeyError:
print(("Capture type not found for {}."
" Setting capture type to 'whole'").format(t))
missed_capt_types[t] = "whole"
return [missed_target_regions, missed_target_names, missed_capt_types]
def align_targets(res_dir, target_regions, species, flank, fasta_files,
fasta_capture_type, genome_identity, genome_coverage,
num_process, gene_names, max_allowed_indel_size,
intra_identity, intra_coverage, capture_types,
min_target_size, merge_distance, savefile):
# create fasta files for each target coordinate
create_target_fastas(res_dir, target_regions, species, flank)
if fasta_files is None:
fasta_sequences = fasta_capture_types = {}
else:
# add target sequences provided by fasta files
fasta_targets = add_fasta_targets(
res_dir, fasta_files, fasta_capture_type=fasta_capture_type)
fasta_sequences = fasta_targets["fasta_sequences"]
fasta_capture_types = fasta_targets["capture_types"]
capture_types.update(fasta_capture_types)
# create a list of target names from all sources
targets_list = (list(target_regions.keys())
+ list(fasta_sequences.keys()))
# align target sequences to reference genome
# create alignment options
genomic_alignment_list = set_genomic_target_alignment_options(
target_regions, fasta_sequences, genome_identity, genome_coverage,
flank)
# perform genome alignment
align_genes_for_design(genomic_alignment_list, res_dir,
alignment_types="general", species=species,
num_processor=num_process)
# merge all alignment files
merge_alignments(res_dir, targets_list, output_prefix="merged")
# parse genome alignment file
# negative merge_distance values keep the target regions separate
# even if they overlap. Positive values lead to merging targets.
# However, the alignments are already carried out with flanking
# sequence so increasing that merge distance is avoided by setting the
# merge_distance 0 here for positive values.
if merge_distance > 0:
merge_distance = 0
genome_alignment = alignment_parser(res_dir, "merged",
spacer=merge_distance,
gene_names=gene_names)
target_regions = copy.deepcopy(genome_alignment[0])
region_names = copy.deepcopy(genome_alignment[1])
imperfect_aligners = copy.deepcopy(genome_alignment[2])
aligned_regions = copy.deepcopy(genome_alignment[3])
overlaps = copy.deepcopy(genome_alignment[4])
# align sequences within target groups (paralog sequences)
align_paralogs(res_dir, target_regions, region_names, imperfect_aligners,
fasta_sequences, species, intra_identity, intra_coverage,
max_allowed_indel_size, num_process)
# compare original target_regions to the final target regions
# to determine if any region is missing due to alignments performed
original_target_regions = genome_alignment[0]
missed_target_regions, missed_target_names, missed_capture_types = (
get_missed_targets(original_target_regions, target_regions,
aligned_regions, min_target_size, flank,
capture_types))
out_dict = {"original_target_regions": genome_alignment[0],
"original_region_names": genome_alignment[1],
"original_imperfect_aligners": genome_alignment[2],
"original_aligned_regions": genome_alignment[3],
"original_overlaps": genome_alignment[4],
"target_regions": target_regions,
"region_names": region_names,
"aligned_regions": aligned_regions,
"capture_types": capture_types,
"imperfect_aligners": imperfect_aligners,
"overlaps": overlaps,
"missed_target_regions": missed_target_regions,
"missed_target_names": missed_target_names,
"missed_capture_types": missed_capture_types}
with open(os.path.join(res_dir, savefile), "w") as outfile:
json.dump(out_dict, outfile, indent=1)
return out_dict
def alignment_mapper(family_name, res_dir):
"""Create a coordinate map of within group alignments."""
alignment_file = family_name + ".aligned"
difference_file = family_name + ".differences"
with open(os.path.join(res_dir, alignment_file), "r") as alignment, open(
os.path.join(res_dir, difference_file), "r") as difference:
# create an alignment dictionary for each region that a query
# aligns to these correspond to each line in the alignment file
# and thus, are relative coordinates.
alignment_dic = {}
for line in alignment:
# extract the column names from the first line
if line.startswith("#"):
newline = line.strip().split("\t")
newline[0] = newline[0][1:]
colnames = list(newline)
# assign values of each column for each alignment
else:
newline = line.strip().split("\t")
temp_dict = {"differences": []}
for i in range(len(colnames)):
temp_dict[colnames[i]] = newline[i]
alignment_id = temp_dict["name1"]
if alignment_id in alignment_dic:
print(("{} aligned to the reference copy multiple times. "
"Only the first alignment will be used for "
"coordinate mapping.").format(alignment_id))
continue
alignment_dic[alignment_id] = temp_dict
cov = float(alignment_dic[alignment_id]["covPct"][:-1])
idt = float(alignment_dic[alignment_id]["idPct"][:-1])
alignment_dic[alignment_id]["score"] = np.mean([idt, cov])
# differences file is a continuous file for all alignments
# extract differences for each alignment
for line in difference:
newline = line.strip().split("\t")
dname = newline[0]
alignment_dic[dname]["differences"].append(newline[:-2])
# map each position in each alignment to the query
for a in alignment_dic:
snps = alignment_dic[a]["snps"] = {}
co = alignment_dic[a]["coordinates"] = {}
rev_co = alignment_dic[a]["reverse_coordinates"] = {}
# if alignment on reverse strand
if alignment_dic[a]["strand2"] == "-":
# genomic coordinate of target start
# this position is zstart2+ away from query end
# (when it is a - alignment)
al_start = int(alignment_dic[a]["zstart1"])
query_plus_end = int(alignment_dic[a]["end2+"])
# assign start to the first key of the coord dictionary
first_key = query_plus_end - 1
co[first_key] = al_start
rev_co[al_start] = first_key
last_key = first_key
inserted = 0
for d in alignment_dic[a]["differences"]:
# start/end coordinates of diff relative to the query
diff_start = int(d[6])
diff_end = int(d[7])
query_length = int(d[9])
# for each diff, fill in the coordinates
# between the last_key in the coord dic and
# start_key - diff start
for j in range(last_key - 1, query_length
- diff_start - 1, -1):
# j decreases by one, starting from the last
# available key the value will be 1 more than the
# previous key (j+1)
if j == last_key - 1:
co[j] = round(co[j + 1] - 0.1) + 1 + inserted
else:
co[j] = round(co[j + 1] - 0.1) + 1
rev_co[co[j]] = j
# current last key is now first_key - diff_start
last_key = query_length - diff_start - 1
query_diff_end = last_key + 1
# genomic coordinate of target at diff start
tar_start = int(d[1])
# genomic coordinate of target at diff end
tar_end = int(d[2])
# if end and start are the same, there is a deletion
# in target compared to query
# all nucleotides from diff start to diff end will have
# the same coordinate
if tar_start == tar_end:
inserted = 0
for i in range(diff_end - diff_start):
co[last_key - i] = tar_start - 0.5
last_key -= diff_end - diff_start - 1
# in cases of deletion in query, only rev_co will be
# updated
elif diff_start == diff_end:
inserted = 0
for i in range(tar_end - tar_start):
rev_co[co[last_key + 1] + i + 1] = (
last_key + 0.5)
inserted += 1
last_key += 1
# last_key will be mapped to target start
# if there is only a SNP and no indel
else:
inserted = 0
co[last_key] = tar_start
rev_co[tar_start] = last_key
query_diff_start = last_key
diff_key = str(query_diff_start) + "-" + str(
query_diff_end)
snps[diff_key] = {"chrom": d[0],
"target_begin": int(d[1]),
"target_end": int(d[2]),
"target_orientation": d[3],
"query_start": diff_start,
"query_end": diff_end,
"query_orientation": d[8],
"target_base": d[10],
"query_base": d[11]}
# fill in the coordinates between last diff
# and the alignment end
query_plus_start = int(alignment_dic[a]["zstart2+"])
for k in range(last_key - 1, query_plus_start - 1, -1):
co[k] = round(co[k+1] - 0.1) + 1
rev_co[co[k]] = k
# when the alignment is on the forward strand
else:
# where on target sequence the alignment starts
tar_start = int(alignment_dic[a]["zstart1"])
# where in the query sequence the alinment starts
q_start = int(alignment_dic[a]["zstart2"])
co[q_start] = tar_start
rev_co[tar_start] = q_start
# last key used is q_start, last key is updated each time
# something is added to the coordinate dict.
last_key = first_key = q_start
inserted = 0
for d in alignment_dic[a]["differences"]:
# where on query sequence the difference starts and
# ends
diff_start = int(d[6])
diff_end = int(d[7])
diff_key = d[6] + "-" + d[7]
query_length = d[9]
snps[diff_key] = {"chrom": d[0],
"target_begin": int(d[1]),
"target_end": int(d[2]),
"target_orientation": d[3],
"query_start": diff_start,
"query_end": diff_end,
"query_orientation": d[8],
"target_base": d[10],
"query_base": d[11]}
# from the last key to the diff start the query and
# target sequences are the same in length and co dict
# is filled so
for i in range(last_key + 1, diff_start):
if i == last_key + 1:
co[i] = round(co[i-1] - 0.1) + 1 + inserted
inserted = 0
else:
co[i] = round(co[i-1] - 0.1) + 1
rev_co[co[i]] = i
# update last used key in co dict
last_key = diff_start
# genomic coordinate of target at diff start
tar_start = int(d[1])
# genomic coordinate of target at diff end
tar_end = int(d[2])
# if end and start are the same, there is a deletion
# in target compared to query
# all nucleotides from diff start to diff end will have
# the same coordinate
if tar_start == tar_end:
inserted = 0
for i in range(diff_end - diff_start):
co[last_key + i] = tar_start - 0.5
last_key += diff_end - diff_start - 1
# in cases of deletion in query (insertion in target)
# position will be mapped to the target end coordinate
elif diff_start == diff_end:
inserted = 0
for i in range(tar_end - tar_start):
rev_co[co[last_key - 1] + 1 + i] = (
last_key - 0.5)
inserted += 1
last_key -= 1
# if there is no indel
# last_key will be mapped to target start
else:
inserted = 0
co[last_key] = tar_start
rev_co[tar_start] = last_key
# fill in the coordinates between last diff
# and the alignment end
q_end = int(alignment_dic[a]["end2"])
for k in range(last_key + 1, q_end):
co[k] = round(co[k-1] - 0.1) + 1
rev_co[co[k]] = k
return alignment_dic
###############################################################
# Design related functions
###############################################################
def order_mips(mip_info, design_name, res_dir):
mip_sequences = []
for g in sorted(mip_info):
for m in sorted(mip_info[g]["mips"]):
minfo = mip_info[g]["mips"][m]["mip_dic"]["mip_information"]
for c in minfo:
s = minfo[c]["SEQUENCE"]
n = m + "_" + c
num = int(m.split("_")[-1][3:])
mip_sequences.append([n, s, g, num, m, c])
if len(mip_info[g]["mips"]) == 0:
mip_info.pop(g)
mip_sequences = sorted(mip_sequences, key=itemgetter(2, 3))
print("%d probes will be ordered." % len(mip_sequences))
# Check for probes that have the same sequence
sequence_only = [i[1].upper() for i in mip_sequences]
for s in sequence_only:
if sequence_only.count(s) > 1:
print("At least two probes share the sequence %s" % s)
rows = ["A", "B", "C", "D", "E", "F", "G", "H"]
columns = list(range(1, 13))
for i in range(len(mip_sequences)):
m = mip_sequences[i]
plate = i/96
pl_pos = i % 96
col = columns[pl_pos % 12]
row = rows[pl_pos/12]
m.extend([row, col, plate])
for i in range(len(mip_sequences)):
m = mip_sequences[i]
s = list(m[1])
N_found = False
for j in s:
if s[j] == "N":
if N_found:
s[j] == "(N)"
else:
N_found = True
s[j] == "(N:25252525)"
m.append("".join(s))
order_dict = {}
for i in range(len(mip_sequences)):
m = mip_sequences[i]
pl = m[-2]
pl_name = design_name + "_" + str(pl)
try:
order_dict[pl_name].append(m)
except KeyError:
order_dict[pl_name] = [m]
for o in order_dict:
with open(os.path.join(res_dir, o), "w") as outfile:
outfile_list = ["\t".join(["WellPosition", "Name", "Sequence"])]
plate_mips = order_dict[o]
for m in plate_mips:
wp = m[-4] + str(m[-3])
outfile_list.append("\t".join([wp, m[0], m[-1]]))
outfile.write("\n".join(outfile_list))
return
def create_dirs(dir_name):
""" create subdirectory names for a given dir,
to be used by os.makedirs, Return a list of
subdirectory names."""
primer3_input_DIR = dir_name + "/primer3_input_files/"
primer3_output_DIR = dir_name + "/primer3_output_files/"
bowtie2_input_DIR = dir_name + "/bowtie2_input/"
bowtie2_output_DIR = dir_name + "/bowtie2_output/"
mfold_input_DIR = dir_name + "/mfold_input/"
mfold_output_DIR = dir_name + "/mfold_output/"
return [primer3_input_DIR, primer3_output_DIR, bowtie2_input_DIR,
bowtie2_output_DIR, mfold_input_DIR, mfold_output_DIR]
def get_snps(region, snp_file):
""" Take a region string and a tabix'ed snp file,
return a list of snps which are lists of
tab delimited information from the snp file. """
# extract snps using tabix, in tab separated lines
snp_temp = subprocess.check_output(["tabix", snp_file, region]).decode(
"UTF-8"
)
# split the lines (each SNP)
snps_split = snp_temp.split("\n")
# add each snp in the region to a list
# as lists of
snps = []
for line in snps_split:
snp = line.split('\t')
snps.append(snp)
# remove last item which is coming from the new line at the end
del snps[-1]
return snps
def get_vcf_snps(region, snp_file):
""" Take a region string and a tabix'ed snp file,
return a list of snps which are lists of
tab delimited information from the snp file. """
# extract snps using tabix, in tab separated lines
snp_temp = subprocess.check_output(["bcftools", "view", "-H", "-G", "-r",
region, snp_file]).decode("UTF-8")
# split the lines (each SNP)
snps_split = snp_temp.split("\n")[:-1]
# add each snp in the region to a list
# as lists of
snps = []
for line in snps_split:
snp = line.split('\t')[:8]
snps.append(snp)
return snps
def get_exons(gene_list):
""" Take a list of transcript information in refgene format and return a
list of exons in the region as [[e1_start, e1_end], [e2_start], [e2_end],
..]. The transcripts must belong to the same gene (i.e. have the same gene
name).Merge overlapping exons.
"""
# get start and end coordinates of exons in gene list
starts = []
ends = []
gene_names = []
gene_ids = []
chrom_list = []
for gene in gene_list:
chrom_list.append(gene[2])
chrom_set = list(set(chrom_list))
if len(chrom_set) == 0:
return {}
chrom_set = [c for c in chrom_set if len(c) < 6]
if len(chrom_set) > 1:
print(("More than one chromosomes, ",
chrom_set,
", has specified gene ",
gene[12]))
return {}
chrom = chrom_set[0]
for gene in gene_list:
if gene[2] == chrom:
starts.extend(list(map(int, gene[9].split(",")[:-1])))
ends.extend(list(map(int, gene[10].split(",")[:-1])))
gene_names.append(gene[12])
gene_ids.append(gene[1])
ori = gene[3]
# pair exon starts and ends
exons = []
for i in range(len(starts)):
exons.append([starts[i], ends[i]])
# check for overlapping exons and merge if any
overlapping = 1
while overlapping:
overlapping = 0
for i in range(len(exons)):
e = exons[i]
for j in range(len(exons)):
x = exons[j]
if (i != j) and ((e[0] <= x[0] <= e[1])
or (e[0] <= x[1] <= e[1])
or (x[0] <= e[0] <= x[1])):
# merge exons and add to the exon list
exons.append([min(e[0], x[0]), max(e[1], x[1])])
# remove the exons e and x
exons.remove(e)
exons.remove(x)
# change overlapping to 1 so we can stop the outer for loop
overlapping = 1
# once an overlapping exon is found, break the for loop
break
if overlapping:
# if an overlapping exon is found, stop this for loop and
# continue with the while loop with the updated exon list
break
# get the gene start and end coordinates
if (len(starts) >= 1) and (len(ends) >= 1):
start = min(starts)
end = max(ends)
else:
print(("No exons found for ", gene_list[0][1]))
return {}
# create an output dict
out = {}
out["chrom"] = chrom
out["begin"] = start + 1
out["end"] = end
out["exons"] = [[e[0] + 1, e[1]] for e in sorted(exons, key=itemgetter(0))]
out["names"] = gene_names
out["ids"] = gene_ids
out["orientation"] = ori
return out
def get_gene_name(region, species):
""" Return the gene(s) in a region. """
gene_names = []
try:
genes = get_snps(region, get_file_locations()[species][
"refgene_tabix"])
for g in genes:
gene_names.append(g[12])
except KeyError:
pass
return gene_names
def get_gene(gene_name, refgene_file, chrom=None, alternative_chr=1):
""" Return genomic coordinates of a gene extracted from the refseq genes file.
Refgene fields are as follows:
0:bin, 1:name, 2:chrom, 3:strand, 4:txStart, 5:txEnd, 6:cdsStart, 7:cdsEnd,
8:exonCount, 9:exonStarts, 10:exonEnds, 11:score, 12:name2,
13:cdsStartStat, 14:cdsEndStat, 15:exonFrames.
Field 12 will be used for name search."""
# all chromosomes must be included if chromosome of the gene is not
# provided therefore, chrom cannot be None when alternative_chr is set to 0
if not (chrom or alternative_chr):
print(("Chromosome of the gene %s must be specified "
"or all chromosomes must be searched."))
print(("Specify a chromosome or set alternative chromosome to 1."
% gene_name))
return 1
with open(refgene_file, 'r') as infile:
coord = []
for line in infile:
if not line.startswith('#'):
newline = line.strip().split('\t')
if newline[12] == gene_name:
coord.append(newline)
if len(coord) < 1:
print(("No gene found with the name ", gene_name))
return []
alter = []
if chrom:
# add each gene to alter dict, in the corresponding chromosome key
for c in coord:
if c[2] == chrom:
alter.append(c)
# find genes on alternate chromosomes if requested
elif alternative_chr:
for c in coord:
alter.append(c)
return alter
def create_gene_fasta(gene_name_list, wdir, species="hs", flank=150,
multi_file=False):
""" Get a list of genes, extract exonic sequence + flanking sequence.
Create fasta files in corresponding directory for each gene if multi_file
is True, create a single fasta file if False.
"""
region_list = []
for gene_name in gene_name_list:
if gene_name.startswith("chr"):
coord = get_coordinates(gene_name)
query = make_region(coord[0], coord[1] - flank, coord[2] + flank)
else:
e = get_exons(
get_gene(gene_name, get_file_locations()[species]["refgene"],
alternative_chr=1)
)
query = e["chrom"] + ":" + str(e["begin"] - flank) + "-" + str(
e["end"] + flank)
region_list.append(query)
regions = get_fasta_list(region_list, species)
fasta_dict = {}
for i in range(len(region_list)):
r = region_list[i]
gene_name = gene_name_list[i]
fasta_dict[gene_name] = regions[r]
if multi_file:
for gene_name in fasta_dict:
save_dict = {gene_name: fasta_dict[gene_name]}
filename = os.path.join(wdir, gene_name + ".fa")
save_fasta_dict(save_dict, filename)
else:
save_fasta_dict(fasta_dict, os.path.join(wdir, "multi.fa"))
def get_region_exons(region, species):
try:
genes = get_snps(region, get_file_locations()[species][
"refgene_tabix"])
except KeyError:
genes = []
return get_exons(genes)
def get_cds(gene_name, species):
gene_list = get_gene(gene_name,
get_file_locations()[species]["refgene"],
alternative_chr=1)
if len(gene_list) > 1:
print(("More than one refgene entry was found for the gene ",
gene_name))
print(("Exons from alternative transcripts will be merged "
"and CDS will be generated from that."))
print("This may lead to unreliable CDS sequence information.")
if len(gene_list) == 0:
return {}
g = gene_list[0]
cds = {"chrom": g[2],
"orientation": g[3],
"begin": int(g[6]) + 1,
"end": int(g[7])}
exons = get_exons(gene_list)["exons"]
exons_nuc = []
for i in range(len(exons)):
e = exons[i]
if not e[0] <= cds["begin"] <= e[1]:
exons[i] == "remove"
else:
e[0] = cds["begin"]
break
exons = [i for i in exons if i != "remove"]
for i in range(-1, -1 * len(exons), -1):
e = exons[i]
if not e[0] <= cds["end"] <= e[1]:
exons[i] = "remove"
else:
e[1] = cds["end"]
break
exons = [i for i in exons if i != "remove"]
sequences = []
for e in exons:
exons_nuc.extend(list(range(e[0], e[1] + 1)))
sequences.append(fasta_to_sequence(
get_fasta(cds["chrom"]
+ ":" + str(e[0]) + "-"
+ str(e[1]), species)))
coord = {}
if cds["orientation"] == "+":
cds["sequence"] = "".join(sequences)
for i in range(len(exons_nuc)):
coord[i] = exons_nuc[i]
else:
cds["sequence"] = reverse_complement("".join(sequences))
rev_exons = list(reversed(exons_nuc))
for i in range(len(exons_nuc)):
coord[i] = rev_exons[i]
cds["coordinates"] = coord
cds
return cds
def make_boulder(fasta, primer3_input_DIR, exclude_list=[],
output_file_name="", sequence_targets=[]):
""" Create a boulder record file in primer3_input_DIR from a given fasta
STRING. SEQUENCE_ID is the fasta header, usually the genomic region
(chrX:m-n) exclude_list is [coordinate,length] of any regions primers
cannot overlap.
"""
# parse fasta string, get header and remove remaining nextlines.
fasta_list = fasta.split("\n")
fasta_head = fasta_list[0][1:]
seq_template = "".join(fasta_list[1:])
# convert exclude list to strings
exclude_string_list = []
exclude_region = ""
for i in exclude_list:
exclude_string_list.append(str(i[0])+","+str(i[1]))
exclude_region = " ".join(exclude_string_list)
# create the boulder record
if len(sequence_targets) == 0:
sequence_target_string = ""
else:
sequence_target_string = " ".join([",".join(map(str, s))
for s in sequence_targets])
boulder = ("SEQUENCE_ID=" + fasta_head + "\n" +
"SEQUENCE_TEMPLATE=" + seq_template + "\n" +
"SEQUENCE_TARGET=" + sequence_target_string + "\n" +
"SEQUENCE_EXCLUDED_REGION=" + exclude_region + "\n" + "=")
if output_file_name == "":
outname = fasta_head
else:
outname = output_file_name
with open(os.path.join(primer3_input_DIR, outname), 'w') as outfile:
outfile.write(boulder)
return boulder
def make_primers_worker(l):
"""
Worker function to make_primers_multi.
A worker function to make primers for multiple regions using separate
processors. Read boulder record in given input directory and creates primer
output files in output directory
"""
# function arguments should be given as a list due to single
# iterable limitation of map_async function of multiprocessor.Pool
# input boulder record name
input_file = l[0]
# primer settings used
settings = l[1]
# output file name
output_file = l[2]
# locations of input/output dirs
primer3_input_DIR = l[3]
primer3_output_DIR = l[4]
primer3_settings_DIR = l[5]
subregion_name = l[6]
paralog_name = l[7]
primer_type = l[8]
input_file = os.path.join(primer3_input_DIR, input_file)
output_file = os.path.join(primer3_output_DIR, output_file)
settings = os.path.join(primer3_settings_DIR, settings)
# call primer3 program using the input and settings file
res = subprocess.run(["primer3_core",
"-p3_settings_file=" + settings, input_file],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if res.returncode != 0:
print(("Primer design for the gene {} subregion {} {} arm failed "
"with error {}").format(paralog_name, subregion_name,
primer_type, res.stderr))
return
else:
primer3_output = res.stdout
# write boulder record to file.
with open(output_file, 'w') as outfile:
outfile.write(primer3_output.decode("UTF-8"))
return
def make_primers_multi(ext_list, lig_list, pro):
"""Design primers in parallel using the make_primers_worker function."""
# create a pool of twice the number of targets (for extension and ligation)
# p = Pool(2*pro)
p = Pool(pro)
# make extension primers using extension arm primer settings
p.map_async(make_primers_worker, ext_list)
# make ligation primers using ligation arm primer settings
p.map_async(make_primers_worker, lig_list)
# close pool
p.close()
# wait for processes to finish
p.join()
return
def primer_parser3(input_file, primer3_output_DIR, bowtie2_input_DIR,
parse_out, fasta=1, outp=1):
"""
Parse a primer3 output file and generate a primer fasta file.
The fasta file for the primers that only contains primer names and
sequences will be placed in the bowtie input directory to be
used as bowtie2 input.
Return a dictionary {sequence_information:{}, primer_information{}}
first dict has tag:value pairs for input sequence while second dict
has as many dicts as the primer number returned with primer name keys
and dicts as values {"SEQUENCE": "AGC..", "TM":"58"...}. Also write
this dictionary to a json file in primer3_output_DIR.
"""
primer_dic = {}
# all target sequence related information will be placed in
# sequence_information dictionary.
primer_dic["sequence_information"] = {}
# primer information will be kept in primer_information dicts.
primer_dic["primer_information"] = {}
# load the whole input file into a list.
infile = open(primer3_output_DIR + input_file, 'r')
lines = []
for line in infile:
# if a line starts with "=" that line is a record separator
if not line.startswith("="):
# boulder record tag-value pairs separated by "="
inline = line.strip('\n').split('=')
lines.append(inline)
infile.close()
# find sequence related information and add it to appropriate dic.
for pair in lines:
tag = pair[0]
value = pair[1]
if tag.startswith("SEQUENCE"):
if tag == "SEQUENCE_ID":
new_value = value.split(",")[-1].replace("CHR", "chr")
primer_dic["sequence_information"][tag] = new_value
else:
primer_dic["sequence_information"][tag] = value
# find how many left primers returned and create empty dictionary
# for each primer in primer_information dict.
for pair in lines:
tag = pair[0]
value = pair[1]
if tag == "PRIMER_LEFT_NUM_RETURNED":
# Add this to sequence information dic because it is sequence
# specific information
primer_dic["sequence_information"][
"SEQUENCE_LEFT_NUM_RETURNED"] = value
# create empty dictionaries with primer name keys
for i in range(int(value)):
primer_key = "PRIMER_LEFT_" + str(i)
primer_dic["primer_information"][primer_key] = {}
# do the same for right primers found
for pair in lines:
tag = pair[0]
value = pair[1]
if tag == "PRIMER_RIGHT_NUM_RETURNED":
primer_dic["sequence_information"][
"SEQUENCE_RIGHT_NUM_RETURNED"] = value
for i in range(int(value)):
primer_key = "PRIMER_RIGHT_" + str(i)
primer_dic["primer_information"][primer_key] = {}
# get sequence coordinate information to determine genomic coordinates of
# primers because primer information is relative to template sequence
sequence_coordinates = get_coordinates(primer_dic[
"sequence_information"]["SEQUENCE_ID"])
seq_chr = sequence_coordinates[0]
seq_start = int(sequence_coordinates[1])
# get primer information from input file and add to primer dictionary
for pair in lines:
tag = pair[0]
value = pair[1]
if ((tag.startswith("PRIMER_LEFT_")
or tag.startswith("PRIMER_RIGHT_"))
and (tag != "PRIMER_LEFT_NUM_RETURNED")
and (tag != "PRIMER_RIGHT_NUM_RETURNED")):
attributes = tag.split('_')
# primer coordinates tag does not include an attribute value
# it is only primer name = coordinates, so:
if len(attributes) > 3:
# then this attribute is not coordinates and should have an
# attribute value such as TM or HAIRPIN etc.
primer_name = '_'.join(attributes[0:3])
attribute_value = '_'.join(attributes[3:])
primer_dic["primer_information"][primer_name][
attribute_value] = value
else:
# then this attribute is coordinates and has no attribute value
# give it an attribute valute "COORDINATES"
primer_name = '_'.join(attributes[0:3])
primer_dic["primer_information"][primer_name][
'COORDINATES'] = value
# the coordinates are relative to sequence template
# find the genomic coordinates
coordinate_values = value.split(",")
if tag.startswith("PRIMER_LEFT"):
# sequence start is added to primer start to get genomic
# primer start
genomic_start = seq_start + int(coordinate_values[0])
# primer len is added "to genomic start because it is a
# left primer
genomic_end = genomic_start + int(coordinate_values[1]) - 1
primer_dic["primer_information"][primer_name][
'GENOMIC_START'] = genomic_start
primer_dic["primer_information"][primer_name][
'GENOMIC_END'] = genomic_end
primer_dic["primer_information"][primer_name][
'CHR'] = seq_chr
primer_dic["primer_information"][primer_name][
'ORI'] = "forward"
else:
# sequence start is added to primer start to get genomic
# primer start
genomic_start = seq_start + int(coordinate_values[0])
# primer len is subtracted from genomic start because it is
# a right primer
genomic_end = genomic_start - int(coordinate_values[1]) + 1
primer_dic["primer_information"][primer_name][
'GENOMIC_START'] = genomic_start
primer_dic["primer_information"][primer_name][
'GENOMIC_END'] = genomic_end
primer_dic["primer_information"][primer_name][
'CHR'] = seq_chr
primer_dic["primer_information"][primer_name][
'ORI'] = "reverse"
# add NAME as a key to primer information dictionary
primer_dic["primer_information"][primer_name]['NAME'] = primer_name
# if some primers were eliminated from initial primer3 output, remove from
# dictionary
for primer in list(primer_dic["primer_information"].keys()):
if primer_dic["primer_information"][primer] == {}:
primer_dic["primer_information"].pop(primer)
# dump the dictionary to json file in primer3_output_DIR if outp parameter
# is true
if outp:
dict_file = open(os.path.join(primer3_output_DIR, parse_out), 'w')
json.dump(primer_dic, dict_file, indent=1)
dict_file.close()
# generate a simple fasta file with primer names
if fasta:
outfile = open(bowtie2_input_DIR+parse_out, 'w')
for primer in primer_dic["primer_information"]:
# primer name is fasta header and sequence is fasta sequence
fasta_head = primer
fasta_line = primer_dic["primer_information"][primer]["SEQUENCE"]
outfile.write(">" + fasta_head + "\n" + fasta_line + "\n")
outfile.close()
return primer_dic
def paralog_primers(primer_dict, copies, coordinate_converter, settings,
primer3_output_DIR, outname, species, outp=0):
"""
Process primers generated for paralogs.
Take a primer dictionary file and add genomic start and end coordinates
of all its paralogs.
"""
# uncomment for using json object instead of dic
# load the primers dictionary from file
# with open(primer_file, "r") as infile:
# primer_dic = json.load(infile)
# primer dict consists of 2 parts, sequence_information dict
# and primer information dict. We wont'change the sequence_info part
primers = primer_dict["primer_information"]
primer_keys = set()
for primer in list(primers.keys()):
p_name = primer
p_dic = primers[primer]
p_coord = coordinate_converter
p_copies = copies
chroms = p_coord["C0"]["chromosomes"]
start = p_dic["GENOMIC_START"]
end = p_dic["GENOMIC_END"]
ref_coord = p_dic["COORDINATES"]
primer_ori = p_dic["ORI"]
p_dic["PARALOG_COORDINATES"] = {}
primer_seq = p_dic["SEQUENCE"]
# add reference copy as paralog
p_dic["PARALOG_COORDINATES"]["C0"] = {"SEQUENCE": primer_seq,
"ORI": primer_ori,
"CHR": chroms["C0"],
"NAME": p_name,
"GENOMIC_START": start,
"GENOMIC_END": end,
"COORDINATES": ref_coord}
for c in p_copies:
if c != "C0":
# check if both ends of the primer has aligned with reference
try:
para_start = p_coord["C0"][c][start]
para_end = p_coord["C0"][c][end]
except KeyError:
# do not add that copy if it is not aligned
continue
para_primer_ori = para_start < para_end
if para_primer_ori:
para_primer_key = (chroms[c] + ":" + str(para_start) + "-"
+ str(para_end))
p_dic["PARALOG_COORDINATES"][c] = {
"ORI": "forward", "CHR": chroms[c], "NAME": p_name,
"GENOMIC_START": para_start, "GENOMIC_END": para_end,
"COORDINATES": ref_coord, "KEY": para_primer_key}
primer_keys.add(para_primer_key)
else:
para_primer_key = chroms[c] + ":" + str(
para_end) + "-" + str(para_start)
p_dic["PARALOG_COORDINATES"][c] = {
"ORI": "reverse", "CHR": chroms[c], "NAME": p_name,
"GENOMIC_START": para_start, "GENOMIC_END": para_end,
"COORDINATES": ref_coord, "KEY": para_primer_key}
primer_keys.add(para_primer_key)
if len(primer_keys) > 0:
primer_sequences = get_fasta_list(primer_keys, species)
for p in primers:
para = primers[p]["PARALOG_COORDINATES"]
for c in para:
if c != "C0":
copy_dict = para[c]
p_ori = copy_dict["ORI"]
p_key = copy_dict["KEY"]
p_seq = primer_sequences[p_key]
if p_ori == "reverse":
p_seq = reverse_complement(p_seq)
copy_dict["SEQUENCE"] = primer_sequences[p_key]
if outp:
with open(os.path.join(primer3_output_DIR, outname), "w") as outf:
json.dump(primer_dict, outf, indent=1)
return primer_dict
def bowtie2_run(fasta_file, output_file, bowtie2_input_DIR,
bowtie2_output_DIR, species, process_num=4,
seed_MM=1, mode="-a", seed_len=18, gbar=1, local=0):
"""Align primers from a fasta file to specified species genome."""
file_locations = get_file_locations()
# check if entered species is supported
genome = file_locations[species]["bowtie2_genome"]
# determine what type of alignment is wanted
# local or end-to-end
if local:
check_local = "--local"
else:
check_local = "--end-to-end"
res = subprocess.Popen(["bowtie2", "-p", str(process_num), "-D", "20",
"-R", "3", "-N", str(seed_MM), "-L",
str(seed_len), "-i", "S,1,0.5", "--gbar",
str(gbar), mode, check_local, "-x", genome, "-f",
os.path.join(bowtie2_input_DIR, fasta_file), "-S",
os.path.join(bowtie2_output_DIR, output_file)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
log_file = os.path.join(
bowtie2_output_DIR, "log_" + species + "_" + id_generator(6))
with open(log_file, "wb") as outfile:
outfile.write(res.communicate()[1])
return 0
def bowtie(fasta_file, output_file, bowtie2_input_DIR, bowtie2_output_DIR,
options, species, process_num=4, mode="-a", local=0, fastq=0):
"""Align a fasta or fastq file to a genome using bowtie2."""
file_locations = get_file_locations()
# check if entered species is supported
genome = file_locations[species]["bowtie2_genome"]
# determine what type of alignment is wanted
# local or end-to-end
if local:
check_local = "--local"
else:
check_local = "--end-to-end"
com = ["bowtie2", "-p " + str(process_num)]
com.extend(options)
com.append(mode)
com.append(check_local)
com.append("-x " + genome)
if fastq:
com.append("-q " + os.path.join(bowtie2_input_DIR, fasta_file))
else:
com.append("-f " + os.path.join(bowtie2_input_DIR, fasta_file))
com.append("-S " + os.path.join(bowtie2_output_DIR, output_file))
subprocess.check_output(com)
return 0
def bwa(fastq_file, output_file, output_type, input_dir,
output_dir, options, species, base_name="None"):
"""
Align a fastq file to species genome using bwa.
Options should be a list that starts with the command (e.g. mem, aln etc).
Additional options should be appended as strings of "option value",
for example, "-t 30" to use 30 threads. Output type can be sam or bam.
Recommended options ["-t30", "-L500", "-T100"]. Here L500 penalizes
clipping severely so the alignment becomes end-to-end and T100 stops
reporting secondary alignments, assuming their score is below 100.
"""
genome_file = get_file_locations()[species]["bwa_genome"]
read_group = ("@RG\\tID:" + base_name + "\\tSM:" + base_name + "\\tLB:"
+ base_name + "\\tPL:ILLUMINA")
options = copy.deepcopy(options)
options.append("-R" + read_group)
if output_type == "sam":
com = ["bwa"]
com.extend(options)
com.append(genome_file)
com.append(os.path.join(input_dir, fastq_file))
with open(os.path.join(output_dir, output_file), "w") as outfile:
subprocess.check_call(com, stdout=outfile)
else:
com = ["bwa"]
com.extend(options)
com.append(genome_file)
com.append(os.path.join(input_dir, fastq_file))
sam = subprocess.Popen(com, stdout=subprocess.PIPE)
bam_com = ["samtools", "view", "-b"]
bam = subprocess.Popen(bam_com, stdin=sam.stdout,
stdout=subprocess.PIPE)
bam_file = os.path.join(output_dir, output_file)
sort_com = ["samtools", "sort", "-T", "/tmp/", "-o", bam_file]
subprocess.run(sort_com, stdin=bam.stdout)
subprocess.run(["samtools", "index", bam_file], check=True,
stderr=subprocess.PIPE)
def bwa_multi(fastq_files, output_type, fastq_dir, bam_dir, options, species,
processor_number, parallel_processes):
"""Align fastq files to species genome using bwa in parallel."""
if len(fastq_files) == 0:
fastq_files = [f.name for f in os.scandir(fastq_dir)]
if output_type == "sam":
extension = ".sam"
elif output_type == "bam":
extension = ".srt.bam"
else:
print(("Output type must be bam or sam, {} was given").format(
output_type))
return
if not os.path.exists(bam_dir):
os.makedirs(bam_dir)
if parallel_processes == 1:
for f in fastq_files:
# get base file name
base_name = f.split(".")[0]
bam_name = base_name + extension
options.extend("-t" + str(processor_number))
bwa(f, bam_name, output_type, fastq_dir, bam_dir, options, species,
base_name)
else:
processor_per_process = processor_number // parallel_processes
p = NoDaemonProcessPool(parallel_processes)
options = options + ["-t " + str(processor_per_process)]
results = []
errors = []
for f in fastq_files:
base_name = f.split(".")[0]
bam_name = base_name + extension
p.apply_async(bwa, (f, bam_name, output_type, fastq_dir, bam_dir,
options, species, base_name),
callback=results.append,
error_callback=errors.append)
p.close()
p.join()
if len(errors) > 0:
for e in errors:
print("Error in bwa_multi function", e.stderr)
def parse_cigar(cigar):
"""
Parse a CIGAR string.
CIGAR string is made up of numbers followed
by key letters that represent a sequence alignment; return a dictionary
with alignment keys and number of bases with that alignment key as values.
Below is some more information about cigar strings.
2S20M1I2M5D,for, example would mean that the 2 bases are "S"oft clipped
from 5' end of the sequence(read) aligned and it is not part of the
alignment; following that 2 bases, 20 bases of the read aligns or "M"atches
to the reference sequence, match here does not mean the bases are
identical, just that there is 1 base of reference for each base of the read
and there are enough similarity between the two sequences that they
aligned. 1 base following the 20M is an insertion, that is, it exists in
the read but not in the reference; 5 bases at the end are "D"eletions,
they are in the reference but not in the read.
"""
cig = {}
values = []
for c in cigar:
try:
values.append(str(int(c)))
except ValueError:
if c in list(cig.keys()):
cig[c] += int("".join(values))
else:
cig[c] = int("".join(values))
values = []
return cig
def get_cigar_length(cigar):
"""Get the length of the reference sequence from CIGAR string."""
try:
# parse cigar string and find out how many insertions are in the
# alignment
insertions = parse_cigar(cigar)["I"]
except KeyError:
# the key "I" will not be present in the cigar string if there is no
# insertion
insertions = 0
# all the values in the cigar dictionary represent a base in the reference
# seq,
# except the insertions, so they should be subtracted
return sum(parse_cigar(cigar).values()) - insertions
def parse_bowtie(primer_dict, bt_file, primer_out, primer3_output_DIR,
bowtie2_output_DIR, species, settings, outp=1):
"""
Take a bowtie output (sam) file and filter top N hits per primer.
When a primer has more than "upper_hit_limit" bowtie hits,
remove that primer.
Add the bowtie hit information, including hit sequence to
the primers dictionary.
"""
# extract how many bowtie hits should be added
# to the primer information for further TM analysis
N = int(settings["hit_limit"])
# how many total bowtie hits gets a primer fired
M = int(settings["upper_hit_limit"])
# read in bowtie file
infile = open(os.path.join(bowtie2_output_DIR, bt_file), 'r')
primers = copy.deepcopy(primer_dict)
# create a temp dic to count hits/primer
counter_dic = {}
# create a bowtie key that will be used when adding
# bowtie information to primers
bowtie_key = "bowtie_information_" + species
# all bowtie hits that will be used further for TM analysis
# will need to have sequence information with them
# region keys for hits (in chrx:begin-end format) will be
# kept in a list for mass fasta extraction later.
keys = set()
#
# read bowtie hits
for line in infile:
try:
if not line.startswith("@"):
record = line.strip('\n').split('\t')
primer_name = record[0]
# increment hit counter for primer
try:
counter_dic[primer_name] += 1
except KeyError:
counter_dic[primer_name] = 1
# check how many hits have been analyzed for this primer
# if upper hit limit has been reached, mark primer for removal
if counter_dic[primer_name] >= M:
primers['primer_information'][primer_name]["remove"] = True
continue
# move on to the next hit if primer hit limit has been reached.
# no further hits will be added for those primers
if counter_dic[primer_name] >= N:
continue
flag = record[1]
# a flag value of 4 means there was no hit, so pass those lines
if flag == "4":
continue
# chromosome of the bowtie hit
chrom = record[2]
# genomic position of bowtie hit
pos = int(record[3])
# get cigar string of alignment
cigar = record[5]
# extract which strand is the bowtie hit on
# true if forward
strand = ((int(record[1]) % 256) == 0)
# get hit coordinates
hit_start = pos
# bowtie gives us the start position of the hit
# end position is calculated using the cigar string
# of the hit
hit_end = pos + get_cigar_length(cigar) - 1
# create region keys required for sequence retrieval
# we want 3 nt extra on the 5' of the primer
# because when alternative primers for paralogs
# are considered we check +/- 3 nt from 5' end
# to balance TM.
if strand:
# Primer's 5' is the hit start when the hit is on forward
# strand so the nucleotides are added at start position
bt_start = hit_start
bt_end = hit_end
hit_str = "forward"
hit_region_key = (chrom + ":" + str(hit_start)
+ "-" + str(hit_end))
else:
bt_start = hit_end
bt_end = hit_start
hit_str = "reverse"
hit_region_key = (chrom + ":" + str(hit_start)
+ "-" + str(hit_end))
# add region key to keys list for fasta retrieval later
keys.add(hit_region_key)
# add all hit information to primer dictionary
try:
primers["primer_information"][primer_name][bowtie_key][
str(counter_dic[primer_name])
] = {"chrom": chrom, "begin": bt_start, "end": bt_end,
"key": hit_region_key, "strand": hit_str}
except KeyError:
primers["primer_information"][primer_name][bowtie_key] = {
str(counter_dic[primer_name]): {"chrom": chrom,
"begin": bt_start,
"end": bt_end,
"key": hit_region_key,
"strand": hit_str}
}
except KeyError:
# in earlier versions of this function the primers with
# excessive hits were removed during iteration and that lead
# to keyerrors. Now there should be no key error.
continue
# get the fasta sequences of all hits
sequence_dic = get_fasta_list(keys, species)
# remove primers with too many hits and add bowtie information for others.
for p in list(primers["primer_information"].keys()):
try:
if primers["primer_information"][p]["remove"]:
primers["primer_information"].pop(p)
continue
except KeyError:
pass
# add hit sequences to primer dictionary
# forward strand hits are added directly
# reverse strand hits are reversed-complemented
# so the hit is always in the primer orientation and
# and similar in sequence"
try:
for h in primers["primer_information"][p][bowtie_key]:
if (primers["primer_information"][p]
[bowtie_key][h]["strand"] == "forward"):
primers["primer_information"][p][bowtie_key][h][
"sequence"
] = sequence_dic[primers["primer_information"][p][
bowtie_key][h]["key"]
]
else:
primers["primer_information"][p][bowtie_key][h][
"sequence"
] = reverse_complement(
sequence_dic[primers["primer_information"]
[p][bowtie_key][h]["key"]]
)
except KeyError:
# if there is no bowtie hit for this primer (happens for host
# species):
primers["primer_information"][p][bowtie_key] = {}
# save the updated primers file
if outp:
with open(os.path.join(
primer3_output_DIR, primer_out), 'w') as outfile:
json.dump(primers, outfile, indent=1)
return primers
def process_bowtie(primers, primer_out, primer3_output_DIR,
bowtie2_output_DIR, species, settings, host=False, outp=1):
"""
Process a primer dict with bowtie information added.
Look at bowtie hits for each primer, determine if they
are on intended targets or nonspecific. In cases of paralogus
regions, check all paralogs and determine if the primer
will bind to any paralog. Create alternative primers if necessary
and allowed. Get melting temperatures of all hits and add
all these information to the primer dictionary.
"""
# get Na, Mg and oligo concentrations these are specified in M but primer3
# uses mM for ions and nM for oligos, so those will be adjusted.
Na = float(settings["Na"]) * 1000
Mg = float(settings["Mg"]) * 1000
conc = float(settings["oligo_conc"]) * pow(10, 9)
# are alternative mip arms allowed/desired
alt_arm = int(settings["alternative_arms"])
bowtie_key = "bowtie_information_" + species
alt_keys = set([])
# get reference chromosome lengths
genome_file = get_file_locations()[species]["fasta_genome"]
reference_lengths = {}
genome_sam = pysam.FastaFile(genome_file)
for r in genome_sam.references:
reference_lengths[r] = genome_sam.get_reference_length(r)
# read bowtie hits
for primer_name in primers['primer_information']:
try:
primer_seq = primers['primer_information'][primer_name]["SEQUENCE"]
if not host:
para = (primers['primer_information'][primer_name]
["PARALOG_COORDINATES"])
if ("BOWTIE_BINDS" not in
primers['primer_information'][primer_name]):
primers[
'primer_information'][primer_name]["BOWTIE_BINDS"] = []
if ("ALT_BINDS" not in
primers['primer_information'][primer_name]):
primers[
'primer_information'][primer_name]["ALT_BINDS"] = []
for bt_hit_name in list(primers['primer_information']
[primer_name][bowtie_key].keys()):
bt_hit = (primers['primer_information'][primer_name]
[bowtie_key][bt_hit_name])
bt_chrom = bt_hit["chrom"]
bt_begin = bt_hit["begin"]
bt_end = bt_hit["end"]
bt_ori = bt_hit["strand"]
bt_seq = bt_hit["sequence"]
if host:
bt_hit["TM"] = calcHeterodimerTm(
primer_seq,
reverse_complement(bt_seq),
mv_conc=Na,
dv_conc=Mg,
dntp_conc=0,
dna_conc=conc
)
continue
intended = 0
# para is a dict like:
# {C0:{"CHR": "chr4", "GENOMIC_START" ..}, C1:{..
# for non-CNV regions, bowtie mapping should be exactly the
# same as genomic coordinates, so even if there is 1 bp
# difference, we'll count this as off target. For CNV regions,
# a more generous 20 bp padding will be allowed to account for
# differences in our mapping and bowtie mapping. Bowtie mapping
# will be accepted as the accurate mapping and paralog
# coordinates will be changed accordingly.
map_padding = 1
if len(para) > 1:
map_padding = 20
for k in para:
para_ori = para[k]["ORI"]
para_chr = para[k]["CHR"]
para_begin = para[k]["GENOMIC_START"]
para_end = para[k]["GENOMIC_END"]
if ((para_ori == bt_ori) and (para_chr == bt_chrom)
and (abs(para_begin - bt_begin) < map_padding)
and (abs(para_end - bt_end) < map_padding)):
intended = 1
# Get bowtie determined coordinates and sequences
# for the paralog copy. These will have priority
# over GENOMIC_ values calculated internally.
para[k]["BOWTIE_END"] = bt_end
para[k]["BOWTIE_START"] = bt_begin
para[k]["BOWTIE_SEQUENCE"] = bt_seq
if intended:
# if the paralog sequence is the same as the reference
# this primer should bind to the paralog copy as well.
if bt_seq.upper() == primer_seq.upper():
para[k]["BOWTIE_BOUND"] = True
primers['primer_information'][
primer_name]["BOWTIE_BINDS"].append(k)
else:
# if the sequences are not exactly the same
# we'll assume the primer does not bind to the
# paralog and attempt to generate an alternative
# primer for this paralog.
para[k]["BOWTIE_BOUND"] = False
# Do this only if alternative MIP arms are allowed
# specified by alt_arm setting.
if alt_arm:
# get chromosome length to avoid setting
# alt arms beyon chromosome ends
para_chr_length = reference_lengths[para_chr]
al = {}
al["ref"] = {"ALT_SEQUENCE": primer_seq}
al["ref"]["ALT_TM"] = calcHeterodimerTm(
primer_seq,
reverse_complement(primer_seq),
mv_conc=Na,
dv_conc=Mg,
dntp_conc=0,
dna_conc=conc
)
for j in range(-3, 4):
if j == 0:
continue
alt_start = bt_begin + j
alt_end = bt_end
if ((alt_start < 0) or (alt_end < 0)
or (alt_start > para_chr_length)
or (alt_end > para_chr_length)):
continue
if para_ori == "forward":
alt_primer_key = create_region(
bt_chrom,
alt_start,
alt_end
)
else:
alt_primer_key = create_region(
bt_chrom,
alt_end,
alt_start
)
al[j] = {}
al[j]["ALT_START"] = alt_start
al[j]["ALT_END"] = alt_end
al[j]["ALT_ORI"] = para_ori
al[j]["ALT_KEY"] = alt_primer_key
alt_keys.add(alt_primer_key)
para[k]["ALTERNATIVES"] = al
else:
para[k]["ALTERNATIVES"] = {}
para[k]["ALT_TM"] = 0
para[k]["ALT_TM_DIFF"] = 100
para[k]["ALT_BOUND"] = False
# remove bowtie hit for intended target
primers['primer_information'][
primer_name][bowtie_key].pop(bt_hit_name)
break
# add TM value for unindended target
if not intended:
bt_hit["TM"] = calcHeterodimerTm(
primer_seq,
reverse_complement(bt_seq),
mv_conc=Na,
dv_conc=Mg,
dntp_conc=0,
dna_conc=conc
)
# Design alternative primers (if allowed) for paralogs
# when there is no bowtie hit for that paralog.
if not host:
for k in para:
try:
para[k]["BOWTIE_END"]
except KeyError:
para_ori = para[k]["ORI"]
para_chr = para[k]["CHR"]
para_begin = para[k]["GENOMIC_START"]
para_end = para[k]["GENOMIC_END"]
para[k]["BOWTIE_BOUND"] = False
if alt_arm:
# get chromosome length to avoid setting
# alt arms beyon chromosome ends
para_chr_length = reference_lengths[para_chr]
al = {}
al["ref"] = {"ALT_SEQUENCE": primer_seq}
al["ref"]["ALT_TM"] = calcHeterodimerTm(
primer_seq,
reverse_complement(primer_seq),
mv_conc=Na,
dv_conc=Mg,
dntp_conc=0,
dna_conc=conc
)
for j in range(-3, 4):
if j == 0:
continue
alt_start = para_begin + j
alt_end = para_end
if ((alt_start < 0) or (alt_end < 0)
or (alt_start > para_chr_length)
or (alt_end > para_chr_length)):
continue
if para_ori == "forward":
alt_primer_key = create_region(
para_chr,
alt_start,
alt_end
)
else:
alt_primer_key = create_region(
para_chr,
alt_end,
alt_start
)
al[j] = {}
al[j]["ALT_START"] = alt_start
al[j]["ALT_END"] = alt_end
al[j]["ALT_ORI"] = para_ori
al[j]["ALT_KEY"] = alt_primer_key
alt_keys.add(alt_primer_key)
para[k]["ALTERNATIVES"] = al
else:
para[k]["ALTERNATIVES"] = {}
para[k]["ALT_TM"] = 0
para[k]["ALT_TM_DIFF"] = 100
para[k]["ALT_BOUND"] = False
except KeyError:
continue
if len(alt_keys) > 0:
alt_sequences = get_fasta_list(alt_keys, species)
for primer_name in primers['primer_information']:
para = (primers['primer_information'][primer_name]
["PARALOG_COORDINATES"])
for k in para:
try:
alt_candidates = para[k]["ALTERNATIVES"]
except KeyError:
continue
for c in list(alt_candidates.keys()):
try:
alt_candidates[c]["ALT_TM"]
except KeyError:
alt_ori = alt_candidates[c]["ALT_ORI"]
alt_key = alt_candidates[c]["ALT_KEY"]
alt_seq = alt_sequences[alt_key]
if alt_ori == "reverse":
alt_seq = reverse_complement(alt_seq)
if alt_seq != "":
alt_tm = calcHeterodimerTm(
alt_seq,
reverse_complement(alt_seq),
mv_conc=Na,
dv_conc=Mg,
dntp_conc=0,
dna_conc=conc
)
alt_candidates[c]["ALT_TM"] = alt_tm
alt_candidates[c]["ALT_SEQUENCE"] = alt_seq
else:
alt_candidates.pop(c)
if outp:
with open(os.path.join(
primer3_output_DIR, primer_out), 'w') as outfile:
json.dump(primers, outfile, indent=1)
return primers
def filter_bowtie(primers, output_file, primer3_output_DIR, species, TM=46,
hit_threshold=0, lower_tm=46, lower_hit_threshold=3, outp=1):
"""
Check TMs of bowtie hits of given primers, on a given genome.
Filter the primers with too many nonspecific hits.
"""
for primer in list(primers["primer_information"].keys()):
# create a hit count parameter for hits with significant tm
# there are two parameters specified in the rinfo file
# high temp limit and low temp limit. The idea is to allow
# a very small (if any) number of nonspecific targets with high TM
# values but allow some low TM off targets.
hc = 0
lhc = 0
# check if bowtie information exists in dic
try:
bt_key = "bowtie_information_" + species
bowtie = primers["primer_information"][primer][bt_key]
for h in bowtie:
hit = bowtie[h]
try:
# if TM information is included in bowtie, compare with
# high and low TM, increment hc, lc if necessary and
# discard primers passing specified off target tresholds.
if float(hit["TM"]) >= TM:
hc += 1
if hc > hit_threshold:
primers["primer_information"].pop(primer)
break
elif float(hit["TM"]) >= lower_tm:
lhc += 1
if lhc > lower_hit_threshold:
primers["primer_information"].pop(primer)
break
except KeyError:
continue
# remove bowtie information once we use it.
primers["primer_information"][primer].pop(bt_key)
except KeyError:
continue
if outp:
# write dictionary to file in primer3_output_DIR
outfile = open(os.path.join(primer3_output_DIR, output_file), 'w')
json.dump(primers, outfile, indent=1)
outfile.close()
return primers
def alternative(primer_dic, output_file,
primer3_output_DIR, tm_diff, outp=1):
"""
Pick the best alternative arm for primers that do not bind all paralogs.
This is done by picking the alternative primer with melting temperature
that is closest to the original primer.
"""
primers = primer_dic["primer_information"]
try:
for primer_name in primers:
primer = primers[primer_name]
para = primer["PARALOG_COORDINATES"]
for c in para:
try:
alts = para[c]["ALTERNATIVES"]
# get the original primer TM
ref_tm = alts["ref"].pop("ALT_TM")
alts.pop("ref")
# sort alt primers by their TM difference from the ref
sorted_alts = sorted(
alts, key=lambda a: abs(alts[a]["ALT_TM"] - ref_tm)
)
# use the primer only if the TM difference is within
# specified limit.
if abs(alts[sorted_alts[0]]["ALT_TM"] - ref_tm) <= tm_diff:
primer["ALT_BINDS"].append(c)
para[c].update(alts[sorted_alts[0]])
para[c].pop("ALTERNATIVES")
except KeyError:
try:
para[c].pop("ALTERNATIVES")
except KeyError:
pass
except IndexError:
try:
para[c].pop("ALTERNATIVES")
except KeyError:
pass
except KeyError:
pass
if outp:
with open(os.path.join(
primer3_output_DIR, output_file), "w") as outfile:
json.dump(primer_dic, outfile, indent=1)
return primer_dic
def score_paralog_primers(primer_dict, output_file, primer3_output_DIR,
ext, mask_penalty, species, backbone, outp=1):
"""
Score primers in a dictionary according to a scoring matrix.
Scoring matrices are somewhat crude at this time.
Arm GC content weighs the most, then arms GC clamp and arm length
Next_base values are last.
"""
primers = primer_dict["primer_information"]
extension = (ext == "extension")
# primer scoring coefficients were calculated based on
# linear models of various parameters and provided as a dict
with open("/opt/resources/mip_scores.dict", "rb") as infile:
linear_coefs = pickle.load(infile)
# the model was developed using specific reaction conditions as below.
# actual conditions may be different from these but we'll use these
# for the model.
na = 25 # Sodium concentration
mg = 10 # magnesium concentration
conc = 0.04 # oligo concentration
# get extension arm sequence
if extension:
for p in primers:
extension_arm = primers[p]["SEQUENCE"]
# calculate gc content of extension arm
extension_gc = calculate_gc(extension_arm)
# count lowercase masked nucleotides. These would likely be masked
# for variation underneath.
extension_lowercase = sum([c.islower() for c in extension_arm])
# calculate TM with the model parameters for TM
ext_TM = primer3.calcTm(extension_arm, mv_conc=na, dv_conc=mg,
dna_conc=conc, dntp_conc=0)
# create a mip parameter dict
score_features = {"extension_gc": extension_gc,
"extension_lowercase": extension_lowercase,
"ext_TM": ext_TM}
# calculate primer score using the linear model provided
tech_score = 0
for feature in score_features:
degree = linear_coefs[feature]["degree"]
primer_feature = score_features[feature]
poly_feat = [pow(primer_feature, i) for i in range(degree + 1)]
tech_score += sum(linear_coefs[feature]["coef"] * poly_feat)
tech_score += linear_coefs[feature]["intercept"]
primers[p]["SCORE"] = tech_score
# get ligation arm parameters
else:
for p in primers:
ligation_arm = primers[p]["SEQUENCE"]
# calculate gc content of extension arm
ligation_gc = calculate_gc(ligation_arm)
# only the 3' end of the ligation arm was important in terms of
# lowercase masking.
ligation_lowercase_end = sum([c.islower()
for c in ligation_arm[-5:]])
# calculate TM of ligation sequence (actual ligation probe arm)
# agains probe backbone.
ligation_bb_TM = primer3.calcHeterodimerTm(
reverse_complement(ligation_arm), backbone,
mv_conc=na, dv_conc=mg, dna_conc=conc, dntp_conc=0)
# create a mip parameter dict
score_features = {"ligation_gc": ligation_gc,
"ligation_lowercase_end": ligation_lowercase_end,
"ligation_bb_TM": ligation_bb_TM}
# calculate primer score using the linear model provided
tech_score = 0
for feature in score_features:
degree = linear_coefs[feature]["degree"]
primer_feature = score_features[feature]
poly_feat = [pow(primer_feature, i) for i in range(degree + 1)]
tech_score += sum(linear_coefs[feature]["coef"] * poly_feat)
tech_score += linear_coefs[feature]["intercept"]
primers[p]["SCORE"] = tech_score
if outp:
# write dictionary to json file
outfile = open(os.path.join(primer3_output_DIR, output_file), "w")
json.dump(primer_dict, outfile, indent=1)
outfile.close()
return primer_dict
def filter_primers(primer_dict, output_file,
primer3_output_DIR, n, bin_size, outp=1):
"""
Filter primers so that only top n scoring primers remain for each bin.
Primers are divided into bins of the given size based on the 3' end of
the primer. Only top performing n primers ending in the same bin will
remain after filtering.
For example, bin_size=3 and n=1 would chose the best scoring primer
among primers that end within 3 bps of each other.
"""
# load extension and ligation primers from file
template_seq = primer_dict["sequence_information"]["SEQUENCE_TEMPLATE"]
template_len = len(template_seq)
forward_bins = {}
reverse_bins = {}
for i in range(template_len//bin_size + 1):
forward_bins[i] = []
reverse_bins[i] = []
for primer in list(primer_dict["primer_information"].keys()):
# get primer orientation
ori = primer_dict["primer_information"][primer]["ORI"]
# get primer start coordinate
start = int(primer_dict["primer_information"][primer]
["COORDINATES"].split(",")[0])
primer_len = int(primer_dict["primer_information"][primer]
["COORDINATES"].split(",")[1])
if ori == "forward":
end = start + primer_len - 1
elif ori == "reverse":
end = start - primer_len + 1
# which bin the start coordinate falls into
end_bin = end//bin_size
# get primer score
score = primer_dict["primer_information"][primer]["SCORE"]
# append the primer name/score to appropriate bin dic
if ori == "forward":
forward_bins[end_bin].append([primer, score])
elif ori == "reverse":
reverse_bins[end_bin].append([primer, score])
best_primer_dict = {}
best_primer_dict["sequence_information"] = primer_dict[
"sequence_information"]
best_primer_dict["primer_information"] = {}
# find best scoring mips in each forward bin
for key in forward_bins:
# sort primers for score
primer_set = sorted(forward_bins[key], key=itemgetter(1))
# get best scoring primers (all primers if there are less than n)
if len(primer_set) < n:
best_primers = primer_set
else:
best_primers = primer_set[-n:]
# add best primers do dictionary
for primers in best_primers:
primer_name = primers[0]
best_primer_dict["primer_information"][primer_name] = primer_dict[
"primer_information"][primer_name]
# find best scoring mips in each reverse bin
for key in reverse_bins:
# sort primers for score
primer_set = sorted(reverse_bins[key], key=itemgetter(1))
# get best scoring primers (all primers if there are less than n)
if len(primer_set) < n:
best_primers = primer_set
else:
best_primers = primer_set[-n:]
# add best primers do dictionary
for primers in best_primers:
primer_name = primers[0]
best_primer_dict["primer_information"][primer_name] = primer_dict[
"primer_information"][primer_name]
# write new dic to file
if outp:
with open(os.path.join(
primer3_output_DIR, output_file), "w") as outfile:
json.dump(best_primer_dict, outfile, indent=1)
return best_primer_dict
def pick_paralog_primer_pairs(extension, ligation, output_file,
primer3_output_DIR, min_size, max_size,
alternative_arms, region_insertions,
subregion_name, outp=1):
"""Pick primer pairs satisfying a given size range."""
# assign primer information dictionaries to a shorter name
ext = extension["primer_information"]
lig = ligation["primer_information"]
# check if extension and ligation dictionaries have primers
if len(ext) == 0:
return 1
if len(lig) == 0:
return 1
# create a primer pairs dic. This dictionary is similar to primer dic
primer_pairs = {}
# has the same sequence_information key:value pairs
primer_pairs["sequence_information"] = {}
# has pair information key instead of primer_information
primer_pairs["pair_information"] = {}
# populate sequence information (same as extension or ligation)
primer_pairs["sequence_information"]['SEQUENCE_TEMPLATE'] = extension[
"sequence_information"]['SEQUENCE_TEMPLATE']
primer_pairs["sequence_information"]['SEQUENCE_EXCLUDED_REGION'] = (
extension["sequence_information"]['SEQUENCE_EXCLUDED_REGION']
)
primer_pairs["sequence_information"]['SEQUENCE_TARGET'] = extension[
"sequence_information"]['SEQUENCE_TARGET']
primer_pairs["sequence_information"]['SEQUENCE_ID'] = extension[
"sequence_information"]['SEQUENCE_ID']
# pick primer pairs
for e in ext.keys():
# extension primer information for this mip will be e_info
e_info = ext[e]
# get primer coordinates
ext_start = e_info["GENOMIC_START"]
ext_end = e_info["GENOMIC_END"]
# get primer orientation
ext_ori = ext_end > ext_start
# if end is greater than start then it is a left(fw) primer,
# and ext_ori is True.
# get coordinates of this primer in paralog copies.
ep_info = e_info["PARALOG_COORDINATES"]
# the paralogs bound by primer according to bowtie mapping
e_binds = e_info["BOWTIE_BINDS"]
# paralogs that were not bound by the primer and alt primers were
# designed.
e_alt_binds = e_info["ALT_BINDS"]
# find a ligation primer
for l in list(lig.keys()):
l_info = lig[l]
# get primer coordinates
lig_start = l_info["GENOMIC_START"]
lig_end = l_info["GENOMIC_END"]
# get orientation of primer
lig_ori = lig_end < lig_start
# if end is less than start, it is a right primer
# create a list for start and end coordinates
coord = []
# continue only if the two orientations have the same value
if lig_ori == ext_ori:
# check if relative positions of primers are correct
if ext_ori:
# ligation end should be greater than extension end
# for forward pairs
position = lig_end > ext_end
else:
# extension end should be greater than ligation end
# for reverse pairs
position = ext_end > lig_end
# get pair information if relative positions of primers are
# correct
if position:
coord = [ext_start, ext_end, lig_start, lig_end]
coord.sort()
prod_size = coord[-1] - coord[0] + 1
pairs = {}
# get paralogus coordinates
lp_info = l_info["PARALOG_COORDINATES"]
l_binds = l_info["BOWTIE_BINDS"]
l_alt_binds = l_info["ALT_BINDS"]
# find the paralogs that are hybridized by both primers
# start with paralog copies that are bound by the
# original primers (not alts).
paralogs = list(set(l_binds).intersection(e_binds))
for p in paralogs:
try:
p_coord = []
ep_start = ep_info[p]["BOWTIE_START"]
ep_end = ep_info[p]["BOWTIE_END"]
ep_ori = ep_end > ep_start
lp_start = lp_info[p]["BOWTIE_START"]
lp_end = lp_info[p]["BOWTIE_END"]
lp_ori = lp_end < lp_start
lp_chrom = lp_info[p]["CHR"]
if lp_ori == ep_ori:
if lp_ori:
p_position = lp_end > ep_end
pair_ori = "forward"
else:
p_position = lp_end < ep_end
pair_ori = "reverse"
if p_position:
p_coord = [ep_start, ep_end,
lp_start, lp_end]
p_coord.sort()
prod_size = p_coord[-1] - p_coord[0] + 1
pairs[p] = {
"capture_size": prod_size,
"extension_start": ep_start,
"extension_end": ep_end,
"ligation_start": lp_start,
"ligation_end": lp_end,
"mip_start": p_coord[0],
"mip_end": p_coord[3],
"capture_start": p_coord[1] + 1,
"capture_end": p_coord[2] - 1,
"chrom": lp_chrom,
"orientation": pair_ori
}
except KeyError:
continue
# check if any pairs' product is within size limits
# taking into account reported insertions within
# the target region. If there are insertions, we reduce
# the max size to accomodate those insertions.
# Deletions are handled differently because their impact
# on the captures will be different. Any deletion that
# is small enough to be captured will still be captured
# without any alterations. However the capture size will
# become smaller, which is not detrimental.
pair_found = 0
captured_copies = []
for p in list(pairs.keys()):
if not region_insertions.empty:
max_insertion_size = region_insertions.loc[
(region_insertions["copy_chrom"]
== pairs[p]["chrom"])
& (region_insertions["copy_begin"]
> pairs[p]["capture_start"])
& (region_insertions["copy_end"]
< pairs[p]["capture_end"]),
"max_size"].sum()
else:
max_insertion_size = 0
adjusted_max_size = max_size - max_insertion_size
if adjusted_max_size < (min_size/2):
continue
# we do not have to adsjust min_size unless the max
# size get too close to min_size, in which case
# we leave a 30 bp distance between min an max so
# that we're not very limited in primer pair choices.
adjusted_min_size = min(adjusted_max_size - 30,
min_size)
if (adjusted_max_size
>= pairs[p]["capture_size"]
>= adjusted_min_size):
captured_copies.append(p)
pair_found = 1
if pair_found:
# if a pair is found for any copy
# remove minimum size restriction for other copies
for p in list(pairs.keys()):
if p in captured_copies:
continue
if not region_insertions.empty:
max_insertion_size = region_insertions.loc[
(region_insertions["copy_chrom"]
== pairs[p]["chrom"])
& (region_insertions["copy_begin"]
> pairs[p]["capture_start"])
& (region_insertions["copy_end"]
< pairs[p]["capture_end"]),
"max_size"].sum()
else:
max_insertion_size = 0
adjusted_max_size = max_size - max_insertion_size
if adjusted_max_size < (min_size/2):
continue
if (adjusted_max_size
>= pairs[p]["capture_size"] >= 0):
captured_copies.append(p)
# C0 must be in the captured copies because the
# reference copy is used for picking mip sets
if "C0" not in captured_copies:
continue
# create a pair name as
# PAIR_extension primer number_ligation primer number
ext_name = e.split('_')[2]
lig_name = l.split('_')[2]
pair_name = ("PAIR_" + subregion_name + "_" + ext_name
+ "_" + lig_name)
if ext_ori:
orientation = "forward"
pair_name = pair_name + "_F"
else:
orientation = "reverse"
pair_name = pair_name + "_R"
primer_pairs["pair_information"][pair_name] = {
"pairs": pairs,
"extension_primer_information": ext[e],
"ligation_primer_information": lig[l],
"orientation": orientation,
"captured_copies": captured_copies
}
# Check if there are any paralog copies that require
# alt primers to be used. If so, create those pairs.
alt_paralogs = list((set(l_alt_binds).union(
e_alt_binds)).difference(paralogs))
alts = {}
for a in alt_paralogs:
try:
alt_arms = []
p_coord = []
# check if the extension primer is the
# original or alt.
if ep_info[a]["BOWTIE_BOUND"]:
ep_start = ep_info[a]["BOWTIE_START"]
ep_end = ep_info[a]["BOWTIE_END"]
else:
try:
ep_start = ep_info[a]["ALT_START"]
ep_end = ep_info[a]["ALT_END"]
alt_arms.append("extension")
except KeyError:
continue
ep_ori = ep_end > ep_start
# check if ligation primer is the original
# or alternative designed.
if lp_info[a]["BOWTIE_BOUND"]:
lp_start = lp_info[a]["BOWTIE_START"]
lp_end = lp_info[a]["BOWTIE_END"]
else:
try:
lp_start = lp_info[a]["ALT_START"]
lp_end = lp_info[a]["ALT_END"]
alt_arms.append("ligation")
except KeyError:
continue
lp_ori = lp_end < lp_start
lp_chrom = lp_info[a]["CHR"]
if lp_ori == ep_ori:
if lp_ori:
p_position = lp_end > ep_end
pair_ori = "forward"
else:
p_position = lp_end < ep_end
pair_ori = "reverse"
if p_position:
p_coord = [ep_start, ep_end,
lp_start, lp_end]
p_coord.sort()
prod_size = (p_coord[-1]
- p_coord[0] + 1)
alts[a] = {
"capture_size": prod_size,
"extension_start": ep_start,
"extension_end": ep_end,
"ligation_start": lp_start,
"ligation_end": lp_end,
"mip_start": p_coord[0],
"mip_end": p_coord[3],
"capture_start": p_coord[1] + 1,
"capture_end": p_coord[2] - 1,
"chrom": lp_chrom,
"orientation": pair_ori,
"alternative_arms": alt_arms
}
except KeyError:
# if extension or ligation primer coordinates
# are not available for the paralog copy
# for any reason, e.g. the copy does not align
# to the ref for this primer, there will be
# a key error and it should be caught in this
# block.
continue
# check if any pairs' product is within size limits
captured_copies = []
for a in list(alts.keys()):
# does it satisfy arm setting?
good_alt = 0
# "any" means both ligation and extension arms
# are allowed to have alt sequences.
if alternative_arms == "any":
good_alt = 1
# if only one arm is allowed to have alt sequence,
# it could be specified as "one" or the specific
# arm (extension or ligation).
elif ((len(alts[a]["alternative_arms"]) == 1)
and ((alternative_arms
== alts[a]["alternative_arms"][0])
or (alternative_arms == "one"))):
good_alt = 1
# if the alt capture is valid, check the capture
# size and determined if it is likely to be
# captured.
if good_alt:
if not region_insertions.empty:
max_insertion_size = region_insertions.loc[
(region_insertions["copy_chrom"]
== alts[a]["chrom"])
& (region_insertions["copy_begin"]
> alts[a]["capture_start"])
& (region_insertions["copy_end"]
< alts[a]["capture_end"]),
"max_size"].sum()
else:
max_insertion_size = 0
adjusted_max_size = (max_size
- max_insertion_size)
if adjusted_max_size < (min_size/2):
continue
if (adjusted_max_size
>= alts[a]["capture_size"] >= 0):
captured_copies.append(a)
primer_pairs["pair_information"][
pair_name]["pairs"][a] = alts[a]
primer_pairs["pair_information"][pair_name][
"alt_copies"] = captured_copies
# return if no pairs found
if len(primer_pairs["pair_information"]) == 0:
# No primer pairs found.
return 1
# write dict to file in primer_output_DIR
if outp:
with open(os.path.join(
primer3_output_DIR, output_file), 'w') as outfile:
json.dump(primer_pairs, outfile, indent=1)
return primer_pairs
def add_capture_sequence(primer_pairs, output_file, primer3_output_DIR,
species, outp=1):
"""
Extract the sequence between primers.
Get captured sequence using the primer coordinates.
"""
capture_keys = set()
for p_pair in primer_pairs["pair_information"]:
pairs = primer_pairs["pair_information"][p_pair]["pairs"]
for p in pairs:
paralog_key = pairs[p]["chrom"] + ":" + str(pairs[p][
"capture_start"]) + "-" + str(pairs[p]["capture_end"])
pairs[p]["capture_key"] = paralog_key
capture_keys.add(paralog_key)
capture_sequence_dic = get_fasta_list(capture_keys, species)
for p_pair in primer_pairs["pair_information"]:
pairs = primer_pairs["pair_information"][p_pair]["pairs"]
for p in pairs:
if pairs[p]["orientation"] == "forward":
pairs[p]["capture_sequence"] = capture_sequence_dic[pairs[p][
"capture_key"]]
else:
pairs[p]["capture_sequence"] = reverse_complement(
capture_sequence_dic[pairs[p]["capture_key"]]
)
if outp:
with open(os.path.join(
primer3_output_DIR, output_file), "w") as outfile:
json.dump(primer_pairs, outfile, indent=1)
return primer_pairs
def make_mips(pairs, output_file, primer3_output_DIR, mfold_input_DIR,
backbone, outp=1):
"""
Make mips from primer pairs.
Take the reverse complement of ligation primer sequence, add the backbone
sequence and the extension primer. Standard backbone is used if none
specified.
Add a new key to each primer pair:
"mip_information" with a dictionary that has SEQUENCE key
and mip sequence as value.
"""
# check if the primer dictionary is empty
if len(pairs["pair_information"]) == 0:
return 1
# get primer sequences for each primer pair
for primers in pairs["pair_information"]:
extension_sequence = pairs["pair_information"][primers][
"extension_primer_information"]["SEQUENCE"]
ligation_sequence = pairs["pair_information"][primers][
"ligation_primer_information"]["SEQUENCE"]
# reverse complement ligation primer
ligation_rc = reverse_complement(ligation_sequence)
# add sequences to make the mip
mip_sequence = ligation_rc + backbone + extension_sequence
# create a dictionary to hold mip information
mip_dic = {"ref": {"SEQUENCE": mip_sequence,
"captures": copy.deepcopy(
pairs["pair_information"][primers]
["captured_copies"]
)}}
# create alternative mips where necessary
if "alt_copies" in list(pairs["pair_information"][primers].keys()):
alt_sequences = {}
alt_counter = 0
alt = pairs["pair_information"][primers]["alt_copies"]
p_para = pairs["pair_information"][primers]["pairs"]
e_para = pairs["pair_information"][primers][
"extension_primer_information"]["PARALOG_COORDINATES"]
l_para = pairs["pair_information"][primers][
"ligation_primer_information"]["PARALOG_COORDINATES"]
# since alt primers are created for each copy, it is possible
# that some copies have the same primer pair. Pick just one
# such pair and remove the others.
for a in alt:
if "extension" in p_para[a]["alternative_arms"]:
extension_sequence = e_para[a]["ALT_SEQUENCE"].upper()
if "ligation" in p_para[a]["alternative_arms"]:
ligation_sequence = l_para[a]["ALT_SEQUENCE"].upper()
value_found = 0
# search through already created alt pairs to see if this one
# is already there.
for key, value in list(alt_sequences.items()):
if ([extension_sequence, ligation_sequence]
== value["sequences"]):
value_found = 1
# add the copy name to the dict and not create
# a new key for this copy.
value["copies"].append(a)
break
# create new entry if this alt pair is new
if not value_found:
alt_sequences[alt_counter] = {
"sequences": [extension_sequence, ligation_sequence],
"copies": [a]
}
alt_counter += 1
# create mip sequence and dict for the alt pairs
for alt_pair in alt_sequences:
seq_dic = alt_sequences[alt_pair]["sequences"]
alt_copies = alt_sequences[alt_pair]["copies"]
# reverse complement ligation primer
ligation_rc = reverse_complement(seq_dic[1])
# add sequences to make the mip
mip = ligation_rc + backbone + seq_dic[0]
mip_dic["alt" + str(alt_pair)] = {"SEQUENCE": mip,
"captures": alt_copies}
pairs["pair_information"][primers]["mip_information"] = mip_dic
# write mip sequences to a fasta file in mfold_input_DIR
# to check hairpin formation
with open(os.path.join(mfold_input_DIR, output_file), "w") as outfile:
for primers in pairs["pair_information"]:
outline = (">" + primers + "\n" + pairs["pair_information"]
[primers]["mip_information"]["ref"]['SEQUENCE'] + "\n")
outfile.write(outline)
# write mip dictionary to file in primer3_output_DIR
if outp:
outfile = open(os.path.join(primer3_output_DIR, output_file), 'w')
json.dump(pairs, outfile, indent=1)
outfile.close()
return pairs
def check_hairpin(pairs, output_file, settings, output_dir, outp=1):
"""Check possible hairpin formation in MIP probe.
Calculate possible hiybridization between the MIP arms or between the MIP
arms and the probe backbone. Remove MIPs with likely hairpins.
"""
pairs = copy.deepcopy(pairs)
# get Na, Mg and oligo concentrations these are specified in M but primer3
# uses mM for ions and nM for oligos, so those will be adjusted.
Na = float(settings["mip"]["Na"]) * 1000
Mg = float(settings["mip"]["Mg"]) * 1000
conc = float(settings["mip"]["oligo_conc"]) * pow(10, 9)
# number of mips will be used to determine the bacbone concentration
mip_count = int(settings["mip"]["mipset_size"])
# get TM thresholds for hairpins, arm tms should be the same
# otherwise we'll use the lower of the two
ext_arm_tm = float(settings["extension"]["hairpin_tm"])
lig_arm_tm = float(settings["ligation"]["hairpin_tm"])
arm_tm = min([ext_arm_tm, lig_arm_tm])
# backbone tm will be used for interactions between arms and
# all the backbones (from other mips as well). This will cause a higher
# tm since the backbones will be more in concentration, so it could
# make sense to keep this threshold high. On the other hand, eliminating
# even low likelyhood interactions could be useful.
backbone_tm = float(settings["mip"]["hairpin_tm"])
backbone_name = settings["mip"]["backbone"]
backbone = mip_backbones[backbone_name]
# go through mips and calculate hairpins
# we will calculate hairpins by looking at TMs between arm sequences
# and backbone sequences since the whole MIP sequence is too long
# for nearest neighbor calculations (at least for primer3 implementation).
for p in list(pairs["pair_information"].keys()):
pair_dict = pairs["pair_information"][p]
mip_dict = pair_dict["mip_information"]
# for each primer pair we can have a number of mips due to paralog
# copies having alternative mips. We'll go through each mip.
for m in list(mip_dict.keys()):
mip_seq = mip_dict[m]["SEQUENCE"]
# extract arm and backbone sequences from the mip sequence
lig = mip_seq[:mip_seq.index(backbone)]
ext = mip_seq[mip_seq.index(backbone) + len(backbone):]
bb = backbone.replace("N", "")
# calculate dimer TMs between sequence combinations
ext_lig = calcHeterodimerTm(ext, lig, mv_conc=Na, dv_conc=Mg,
dntp_conc=0, dna_conc=conc)
bb_ext_arm = calcHeterodimerTm(ext, bb, mv_conc=Na, dv_conc=Mg,
dntp_conc=0, dna_conc=conc)
bb_lig_arm = calcHeterodimerTm(lig, bb, mv_conc=Na, dv_conc=Mg,
dntp_conc=0, dna_conc=conc)
# take the maximum TM for hairpin threshold comparison
arms = max([ext_lig, bb_ext_arm, bb_lig_arm])
# calculate TM between arms and the whole reaction backbones
# backbone concentration will be more for this calculation.
bb_ext = calcHeterodimerTm(ext, bb, mv_conc=Na, dv_conc=Mg,
dntp_conc=0, dna_conc=conc * mip_count)
bb_lig = calcHeterodimerTm(lig, bb, mv_conc=Na, dv_conc=Mg,
dntp_conc=0, dna_conc=conc * mip_count)
bb_temp = max([bb_ext, bb_lig])
# if either hairpin tms is higher than the limit, remove the mip
# and remove the paralog copy that is supposed to be captured
# by this specific mip from the pair dictionary.
if (arms > arm_tm) or (bb_temp > backbone_tm):
lost_captures = mip_dict[m]["captures"]
mip_copies = pair_dict["captured_copies"]
mip_copies = list(set(mip_copies).difference(lost_captures))
pair_dict["captured_copies"] = mip_copies
alt_copies = pair_dict["alt_copies"]
alt_copies = list(set(alt_copies).difference(lost_captures))
pair_dict["alt_copies"] = alt_copies
mip_dict.pop(m)
else:
mip_dict[m]["Melting Temps"] = {"arms_hp": ext_lig,
"ext_hp": bb_ext_arm,
"lig_hp": bb_lig_arm,
"ext_backbone": bb_ext,
"lig_backbone": bb_lig}
if len(mip_dict) == 0:
pairs["pair_information"].pop(p)
for p in pairs["pair_information"].keys():
pair_dict = pairs["pair_information"][p]
hp_dict = pair_dict["hairpin"] = {}
mip_dict = pair_dict["mip_information"]
for m in mip_dict:
hp_dict[m] = mip_dict[m]["Melting Temps"]
if outp:
output_file = os.path.join(output_dir, output_file)
with open(output_file, "w") as outfile:
json.dump(pairs, outfile)
return pairs
def filter_mips(mip_dic, bin_size, mip_limit):
"""
Filter MIPs covering similar regions.
Filter MIPs so that only top scoring mip ending within the "bin_size"
nucleotides on the same strand remain.
"""
# load extension and ligation primers from file
shuffled = list(mip_dic.keys())
random.shuffle(shuffled)
for m in shuffled:
if len(mip_dic) <= mip_limit:
return
try:
m_start = mip_dic[m].mip["C0"]["capture_start"]
m_end = mip_dic[m].mip["C0"]["capture_end"]
m_func = mip_dic[m].func_score
m_tech = mip_dic[m].tech_score
m_ori = mip_dic[m].mip["C0"]["orientation"]
for n in shuffled:
if len(mip_dic) <= mip_limit:
return
try:
if mip_dic[m].name != mip_dic[n].name:
n_start = mip_dic[n].mip["C0"]["capture_start"]
n_end = mip_dic[n].mip["C0"]["capture_end"]
n_func = mip_dic[n].func_score
n_tech = mip_dic[n].tech_score
n_ori = mip_dic[n].mip["C0"]["orientation"]
if (((abs(n_start - m_start) <= bin_size)
and (abs(n_end - m_end) <= bin_size))
and (m_ori == n_ori)):
if (m_tech + m_func) >= (n_tech + n_func):
mip_dic.pop(n)
else:
mip_dic.pop(m)
break
except KeyError:
continue
except KeyError:
continue
return
def compatible_mip_check(m1, m2, overlap_same, overlap_opposite):
d = m1.mip_dic
# get m1 coordinates
ext_start = d["extension_primer_information"]["GENOMIC_START"]
ext_end = d["extension_primer_information"]["GENOMIC_END"]
lig_start = d["ligation_primer_information"]["GENOMIC_START"]
lig_end = d["ligation_primer_information"]["GENOMIC_END"]
# get mip1 orientation
ori = d["orientation"]
# get m2 coordinates
m = m2.mip_dic
next_ext_start = m["extension_primer_information"]["GENOMIC_START"]
next_ext_end = m["extension_primer_information"]["GENOMIC_END"]
next_lig_start = m["ligation_primer_information"]["GENOMIC_START"]
next_lig_end = m["ligation_primer_information"]["GENOMIC_END"]
# get mip2 orientation
next_ori = m["orientation"]
if ori == next_ori:
m1_start = min([ext_start, ext_end, lig_start, lig_end])
m1_end = max([ext_start, ext_end, lig_start, lig_end])
m2_start = min([next_ext_start, next_ext_end, next_lig_start,
next_lig_end])
m2_end = max([next_ext_start, next_ext_end, next_lig_start,
next_lig_end])
ol = overlap([m1_start, m1_end], [m2_start, m2_end])
if len(ol) == 0:
return True
else:
return (ol[1] - ol[0] + 1) <= overlap_same
else:
m1_set = set(list(range(min([ext_start, ext_end]),
max([ext_start, ext_end]) + 1))
+ list(range(min([lig_start, lig_end]),
max([lig_start, lig_end]) + 1)))
m2_set = set(list(range(min([next_ext_start, next_ext_end]),
max([next_ext_start, next_ext_end]) + 1))
+ list(range(min([next_lig_start, next_lig_end]),
max([next_lig_start, next_lig_end]) + 1)))
ol = len(m1_set.intersection(m2_set))
return ol <= overlap_opposite
def compatible_chains(primer_file, mip_dict, primer3_output_DIR,
primer_out, output_file, must_bonus, set_copy_bonus,
overlap_same, overlap_opposite, outp, bin_size,
trim_increment, trim_limit, set_size, chain_mips,
intervals):
try:
with open(os.path.join(
primer3_output_DIR, primer_file), "r") as infile:
scored_mips = json.load(infile)
except IOError:
print("Primer file does not exist.")
return 1
else:
# make a copy of the original mip dict to use in filtering
temp_dict = copy.deepcopy(mip_dict)
# create small subregions for binning MIPs and creating compatible
# mip sets for smaller regions
begin = intervals[0]
end = intervals[1]
bins = list(range(begin, end, bin_size))
# if a single nucleotide is the target, the interval will be the
# position of that nucleotide as [pos, pos] and the range will return
# an empty list. In this case we'll crease a [pos, pos] list instead.
if begin == end:
bins = [begin, end]
if bins[-1] != end:
bins.append(end)
num_bins = len(bins) - 1
# group MIPs into bins. Bins can share MIPs.
binned = {}
for i in range(num_bins):
binned[i] = {}
bin_start = bins[i]
bin_end = bins[i + 1]
for k in temp_dict:
cp = temp_dict[k].mip["C0"]
cs = cp["capture_start"]
ce = cp["capture_end"]
if len(overlap([cs, ce], [bin_start, bin_end])) > 0:
binned[i][k] = temp_dict[k]
# remove MIPs covering similar regions until we have only
# "set_size" number of MIPs per bin.
for i in binned:
trim_size = 1
while (trim_size <= trim_limit) and (len(binned[i]) > set_size):
filter_mips(binned[i], trim_size, set_size)
trim_size += trim_increment
# create (in)compatibility lists for each MIP
for k in list(scored_mips["pair_information"].keys()):
# get coordinates of mip arms
d = scored_mips["pair_information"][k]
# extension arm start position
es = d["extension_primer_information"]["GENOMIC_START"]
# extension arm end position
ee = d["extension_primer_information"]["GENOMIC_END"]
# ligation arm start position
ls = d["ligation_primer_information"]["GENOMIC_START"]
# ligation arm end position
le = d["ligation_primer_information"]["GENOMIC_END"]
# get mip orientation
ori = d["orientation"]
# create an in/compatibility list
incompatible = set()
compatible = set()
# loop through all mips to populate compatibility lists
for mk in list(scored_mips["pair_information"].keys()):
m = scored_mips["pair_information"][mk]
# next MIP's extension arm start position
nes = m["extension_primer_information"]["GENOMIC_START"]
# next MIP's extension arm end position
nee = m["extension_primer_information"]["GENOMIC_END"]
# next MIP's ligation arm start position
nls = m["ligation_primer_information"]["GENOMIC_START"]
# next MIP's ligation arm end position
nle = m["ligation_primer_information"]["GENOMIC_END"]
# get mip orientation
next_ori = m["orientation"]
compat = 0
next_compat = 0
# check if the two mips are compatible in terms of
# orientation and coordinates
if ori == next_ori == "forward":
if (((ls < nls) and (ls < nes + overlap_same))
or ((ls > nls) and (es + overlap_same > nls))):
compat = 1
elif ori == next_ori == "reverse":
if (((ls < nls) and (es < nls + overlap_same))
or ((ls > nls) and (ls + overlap_same > nes))):
compat = 1
elif (ori == "forward") and (next_ori == "reverse"):
if ((ls < nls + overlap_opposite)
or (es + overlap_opposite > nes)):
compat = 1
elif ((es < nls) and (ee < nls + overlap_opposite)
and (le + overlap_opposite > nle)
and (ls < nee + overlap_opposite)):
compat = 1
next_compat = 1
elif ((es > nls) and (es + overlap_opposite > nle)
and (ee < nee + overlap_opposite)
and (le + overlap_opposite > nes)):
compat = 1
elif (ori == "reverse") and (next_ori == "forward"):
if ((ls + overlap_opposite > nls)
or (es < nes + overlap_opposite)):
compat = 1
elif ((ls > nes) and (ls + overlap_opposite > nee)
and (le < nle + overlap_opposite)
and (ee + overlap_opposite > nls)):
compat = 1
elif ((ls < nes) and (le < nes + overlap_opposite)
and (ee + overlap_opposite > nee)
and (es < nle + overlap_opposite)):
compat = 1
next_compat = 1
if not compat:
incompatible.add(mk)
if next_compat:
compatible.add(mk)
d["incompatible"] = incompatible
d["compatible"] = compatible
def compatible_recurse(l):
"""
Take a list, l, of numbers that represent a mip set with
their corresponding "place" in the mip dictionary, and index
number, i. Find the subset of mips in the rest of the list
that are compatible with the mip at index i, using compatibility
dictionary d. For each mip in the subset, find compatible mips
in the rest of the list. Recurse until the subset does not have
any mips. Append each compatible subset to a final result list, f.
"""
# create a set of mips that are incompatible with any mip in
# the starting list.
incomp = set(l)
for il in l:
incomp.update(scored_mips["pair_information"][il][
"incompatible"])
# create a set of mips that can be the "next" mip that can be
# added to the mip list
comp = scored_mips["pair_information"][l[-1]][
"compatible"].difference(incomp).intersection(subset)
# if there are mips that can be added, call compatible_recurse
# function for each of those mips
if len(comp) > 0:
for n in comp:
compatible_recurse(l + [n])
# stop recursing when the mip chain cannot be elongated
else:
mip_sets.append((l))
keys = sorted(scored_mips["pair_information"],
key=lambda a: scored_mips["pair_information"][a]
["pairs"]["C0"]["capture_start"])
ms_dict = {}
for i in binned:
subset = binned[i]
mip_sets = []
for k in keys:
if k in subset:
comp_list = scored_mips["pair_information"][k][
"compatible"].intersection(subset)
if len(comp_list) > 0:
# for each of the mips in the compatibility list,
for m in comp_list:
# check if these two mips are present in other sets
# if they are, then no need to pursue this branch
# anymore as the same branch will be in the other
# mip set as well
test_set = frozenset([k, m])
for p_set in mip_sets:
if test_set.issubset(set(p_set)):
break
else:
# create an initial result list to be used by
# the compatible_recurse function
compatible_recurse([k, m])
else:
mip_sets.append(([k]))
ms_dict[i] = mip_sets
# define a funtcion for getting the mipset score and coverage
def score_mipset(mip_set):
# create a dic for diffs captured cumulatively by all
# mips in the set
merged_caps = []
# create a list for mip scores based on mip sequence and
# not the captured diffs
mip_scores = []
# create a list for what is captured by the set (only must
# captures)
must_captured = []
# create a list for other targets captured
targets_captured = []
# a list for mip coordinates
capture_coordinates = []
for mip_key in mip_set:
# extract the mip name
# extract the captured diffs from the mip_dic and
# append to capture list
mip_obj = mip_dict[mip_key]
uniq = mip_obj.capture_info["unique_captures"]
merged_caps.extend(uniq)
must_captured.extend(mip_obj.captures)
targets_captured.extend(mip_obj.captured_targets)
if ((mip_obj.tech_score > 0)
and (mip_obj.func_score > 0)):
mip_scores.append(
float(mip_obj.tech_score * mip_obj.func_score)
/ 1000
)
else:
mip_scores.append(
float(mip_obj.tech_score + mip_obj.func_score)
/ 1000)
mcoord = sorted(
[mip_obj.extension["C0"]["GENOMIC_START"],
mip_obj.ligation["C0"]["GENOMIC_START"],
mip_obj.extension["C0"]["GENOMIC_END"],
mip_obj.ligation["C0"]["GENOMIC_END"]]
)
capture_coordinates.append([mcoord[1] + 1,
mcoord[2] - 1])
merged_capture_coordinates = merge_overlap(
capture_coordinates, 50)
scp = len(set(merged_caps)) * set_copy_bonus
must_set = list(set(must_captured))
mb = len(must_set) * must_bonus
total_score = mb + scp + sum(mip_scores)
return total_score, merged_capture_coordinates
# create a dictionary to hold mip sets and their scores
mip_set_dict = {}
for i in ms_dict:
mip_set_dict[i] = {}
bin_co = bins[i: i + 2]
bin_size = bin_co[1] - bin_co[0] + 1
for j in range(len(ms_dict[i])):
ms = ms_dict[i][j]
sc = score_mipset(ms)
coverage = overlap(sc[1][0], bin_co)
coverage = (coverage[1] - coverage[0] + 1) / bin_size
mip_set_dict[i][j] = {"mip_list": ms, "score": sc[0],
"coordinates": sc[1][0],
"coverage": coverage}
for i in mip_set_dict:
iter_keys = list(mip_set_dict[i].keys())
for j in iter_keys:
try:
s1 = mip_set_dict[i][j]["mip_list"]
sc1 = mip_set_dict[i][j]["score"]
crd1 = mip_set_dict[i][j]["coordinates"]
cov1 = mip_set_dict[i][j]["coverage"]
for k in iter_keys:
if k == j:
continue
try:
s2 = mip_set_dict[i][k]["mip_list"]
sc2 = mip_set_dict[i][k]["score"]
crd2 = mip_set_dict[i][k]["coordinates"]
cov2 = mip_set_dict[i][k]["coverage"]
if check_redundant_region(crd1, crd2, spacer=0):
# if one set is to be removed pick the one
# with full coverage of the target region
# in case there is one
if chain_mips:
if (cov1 == 1) and (cov2 < 1):
mip_set_dict[i].pop(k)
elif (cov2 == 1) and (cov1 < 1):
mip_set_dict[i].pop(j)
break
# if both are covering the target
# or if both are failing to cover
# then pick the set with better score
elif sc2 > sc1:
mip_set_dict[i].pop(j)
break
else:
mip_set_dict[i].pop(k)
# if chaining mip is not required
# pick the better scoring set
elif sc2 > sc1:
mip_set_dict[i].pop(j)
break
else:
mip_set_dict[i].pop(k)
except KeyError:
continue
except KeyError:
continue
# merge compatible chains within each bin (to some extent)
merged_sets = {}
for i in mip_set_dict:
mip_sets = set()
for j in mip_set_dict[i]:
mip_sets.add(frozenset(mip_set_dict[i][j]["mip_list"]))
# these mip sets only contain mip chains. We can expand each
# such set by merging with other sets after removing incompatible
# mips from the second set.
counter = 0
for counter in range(5):
new_mip_sets = set()
for s1 in mip_sets:
inc = set()
for m in s1:
inc.update(scored_mips["pair_information"][m][
"incompatible"])
new_set = set(s1)
for s2 in mip_sets:
counter += 1
s3 = s2.difference(inc).difference(new_set)
if len(s3) > 0:
new_set.update(s3)
for m in new_set:
inc.update(scored_mips["pair_information"][m][
"incompatible"])
new_mip_sets.add(frozenset(new_set))
mip_sets = new_mip_sets
if len(mip_sets) > 0:
merged_sets[i] = mip_sets
# combine mip sets in different bins
# first, calculate how many combinations there will be
combo_length = 1
for i in merged_sets:
combo_length *= len(merged_sets[i])
# if too many combinations, reduce by picking the top 5 scoring
# sets for each bin
if combo_length > pow(10, 7):
for i in list(merged_sets.keys()):
top_sets = set(sorted(merged_sets[i],
key=lambda a: score_mipset(a)[0],
reverse=True)[:5])
merged_sets[i] = top_sets
combo_length = 1
for i in merged_sets:
combo_length *= len(merged_sets[i])
# if still too many combinations, take the top set for each bin
if combo_length > pow(10, 7):
for i in list(merged_sets.keys()):
top_sets = set(sorted(merged_sets[i],
key=lambda a: score_mipset(a)[0],
reverse=True)[:1])
merged_sets[i] = top_sets
# combine mip sets in different bins
combined_sets = set()
combo_list = list(itertools.product(
*[merged_sets[i] for i in sorted(merged_sets)]))
for l in combo_list:
if len(l) == 1:
m_set = set(l[0])
else:
m_set = set()
for i in range(len(l) - 1):
s1 = l[i]
s2 = l[i + 1]
inc = set()
for m in s1:
inc.update(scored_mips["pair_information"][m][
"incompatible"])
s3 = s2.difference(inc)
m_set.update(s1.union(s3))
combined_sets.add(frozenset(m_set))
if outp:
with open(os.path.join(
primer3_output_DIR, output_file), "w") as outfile:
outfile.write("\n".join([",".join(s) for s in combined_sets])
+ "\n")
with open(os.path.join(
primer3_output_DIR, primer_out), "wb") as outfile:
pickle.dump(scored_mips, outfile)
return combined_sets
def design_mips(design_dir, g):
print(("Designing MIPs for ", g))
try:
Par = mod.Paralog(os.path.join(design_dir, g, "resources",
g + ".rinfo"))
Par.run_paralog()
if Par.copies_captured:
print(("All copies were captured for paralog ", Par.paralog_name))
else:
print(("Some copies were NOT captured for paralog ",
Par.paralog_name))
if Par.chain_mips:
if Par.chained_mips:
print(("All MIPs are chained for paralog ", Par.paralog_name))
else:
print(("MIPs are NOT chained for paralog ", Par.paralog_name))
except Exception as e:
print((g, str(e), " FAILED!!!"))
return
def design_mips_worker(design_list):
design_dir, g = design_list
print(("Designing MIPs for ", g))
try:
rinfo_file = os.path.join(design_dir, g, "resources", g + ".rinfo")
Par = mod.Paralog(rinfo_file)
Par.run_paralog()
if len(Par.mips) == 0:
return
if Par.copies_captured:
print(("All copies were captured for paralog ", Par.paralog_name))
else:
print(("Some copies were NOT captured for paralog ",
Par.paralog_name))
if Par.chain_mips:
if Par.chained_mips:
print(("All MIPs are chained for paralog ", Par.paralog_name))
else:
print(("MIPs are NOT chained for paralog ", Par.paralog_name))
except Exception as e:
print((g, str(e), " FAILED!!!"))
traceback.print_exc()
return 0
def design_mips_multi(design_dir, g_list, num_processor):
chore_list = [[design_dir, g] for g in g_list]
res = []
try:
p = NoDaemonProcessPool(num_processor)
p.map_async(design_mips_worker, chore_list, callback=res.append)
p.close()
p.join()
except Exception as e:
res.append(str(e))
return res
def parasight(resource_dir,
design_info_file,
designed_gene_list=None,
extra_extension=".extra",
use_json=False):
if not use_json:
with open(design_info_file, "rb") as infile:
design_info = pickle.load(infile)
else:
with open(design_info_file) as infile:
design_info = json.load(infile)
output_list = ["#!/usr/bin/env bash"]
pdf_dir = os.path.join(resource_dir, "pdfs")
backup_list = ["#!/usr/bin/env bash"]
gs_list = ["#!/usr/bin/env bash"]
pdf_list = ["#!/usr/bin/env bash"]
pdf_merge_list = ["#!/usr/bin/env bash", "cd " + pdf_dir]
pdf_convert_list = ["gs -dBATCH -dNOPAUSE -q -sDEVICE=pdfwrite "
+ "-dPDFSETTINGS=/prepress -dAutoRotatePages=/All "
"-sOutputFile=merged.pdf"]
if not os.path.exists(pdf_dir):
os.makedirs(pdf_dir)
for t in design_info:
basename = os.path.join(design_info[t]["design_dir"], t, t)
backup_name = basename + ".extra"
filtered_name = basename + "_filtered.pse"
backup_list.append("scp " + backup_name + " " + backup_name + ".bak")
backup_list.append("mv " + filtered_name + " " + backup_name)
psname = basename + ".01.01.ps"
pdfname = basename + ".pdf"
gs_command = ("gs -dBATCH -dNOPAUSE -q -sDEVICE=pdfwrite "
+ "-dPDFSETTINGS=/prepress -dAutoRotatePages=/All "
"-sOutputFile=" + pdfname + " " + psname)
if designed_gene_list is not None:
if t in designed_gene_list:
pdf_convert_list.append(t + ".pdf")
else:
pdf_convert_list.append(t + ".pdf")
gs_list.append(gs_command)
pdf_list.append("cp " + basename + ".pdf "
+ os.path.join(pdf_dir, t + ".pdf"))
outlist = ["parasight76.pl",
"-showseq", basename + ".show",
"-extra", basename + extra_extension,
"-template", "/opt/resources/nolabel.pst",
"-precode file:" + basename + ".precode",
"-die"]
output_list.append(" ".join(outlist))
with open(basename + ".precode", "w") as outfile:
outfile.write("$opt{'filename'}='" + t
+ "';&fitlongestline; &print_all (0,'"
+ basename + "')")
with open(os.path.join(resource_dir, "backup_commands"), "w") as outfile:
outfile.write("\n".join(backup_list))
with open(
os.path.join(resource_dir, "parasight_commands"), "w") as outfile:
outfile.write("\n".join(output_list))
with open(os.path.join(resource_dir, "gs_commands"), "w") as outfile:
outfile.write("\n".join(gs_list))
with open(os.path.join(resource_dir, "copy_commands"), "w") as outfile:
outfile.write("\n".join(pdf_list))
pdf_merge_list.append(" ".join(pdf_convert_list))
with open(os.path.join(resource_dir, "convert_commands"), "w") as outfile:
outfile.write("\n".join(pdf_merge_list))
visualization_list = ["#!/usr/bin/env bash"]
visualization_list.append("chmod +x backup_commands")
visualization_list.append("./backup_commands")
visualization_list.append("chmod +x parasight_commands")
visualization_list.append("./parasight_commands")
visualization_list.append("chmod +x gs_commands")
visualization_list.append("./gs_commands")
visualization_list.append("chmod +x copy_commands")
visualization_list.append("./copy_commands")
visualization_list.append("chmod +x convert_commands")
visualization_list.append("./convert_commands")
with open(os.path.join(resource_dir, "visualize.sh"), "w") as outfile:
outfile.write("\n".join(visualization_list))
return
def parasight_print(resource_dir, design_dir, design_info_file,
designed_gene_list=None, extra_extension=".extra",
use_json=False, print_out=False):
if not use_json:
with open(design_info_file, "rb") as infile:
design_info = pickle.load(infile)
else:
with open(design_info_file) as infile:
design_info = json.load(infile)
output_file = os.path.join(resource_dir, "parasight_print.txt")
with open(output_file, "w") as outfile:
for g in design_info:
if (designed_gene_list is None) or (g in designed_gene_list):
show_file = os.path.join(design_dir, g, g + ".show")
extras_file = os.path.join(design_dir, g, g + extra_extension)
line = ["parasight76.pl", "-showseq", show_file,
"-extra ", extras_file]
if print_out:
print(" ".join(line))
outfile.write(" ".join(line) + "\n")
###############################################################
# Data analysis related functions
###############################################################
def get_analysis_settings(settings_file):
"""Convert analysis settings file to dictionary."""
settings = {}
with open(settings_file) as infile:
for line in infile:
try:
if not line.startswith("#"):
newline = line.strip().split("\t")
value = newline[1].split(",")
if len(value) == 1:
settings[newline[0]] = value[0]
else:
settings[newline[0]] = [v for v in value if v != ""]
except Exception as e:
print(("Formatting error in settings file, line {}"
"causing error '{}''").format(line, e))
print(newline)
return
return settings
def write_analysis_settings(settings, settings_file):
"""Create a settings file from a settings dictionary."""
outfile_list = [["# Setting Name", "Setting Value"]]
for k, v in settings.items():
if isinstance(v, list):
val = ",".join(map(str, v))
else:
val = str(v)
outfile_list.append([k, val])
with open(settings_file, "w") as outfile:
outfile.write("\n".join(["\t".join(o) for o in outfile_list]) + "\n")
return
###############################################################################
# New contig based analysis for vcf generation
###############################################################################
def map_haplotypes(settings):
"""Bwa-map haplotypes from MIPWrangler output to the reference genome.
Extract each unique haplotype sequence from the MIPWrangler output and
map to reference genome. MIPWrangler maps the sequencing data to the MIPs
used for an experiment based on the probe arms. We compare here whether
the best genomic loci for a given haplotype matches to the MIPWrangler
assignment. If not, we consider those off target and remove.
"""
wdir = settings["workingDir"]
haplotypes_fq_file = os.path.join(wdir, settings["haplotypesFastqFile"])
haplotypes_sam_file = os.path.join(wdir, settings["haplotypesSamFile"])
bwa_options = settings["bwaOptions"]
call_info_file = settings["callInfoDictionary"]
species = settings["species"]
try:
tol = int(settings["alignmentTolerance"])
except KeyError:
tol = 200
# DATA EXTRACTION ###
raw_results = pd.read_table(os.path.join(wdir,
settings["mipsterFile"]))
##########################################################
# Add the statistics for each haplotype to the data
# such as how many samples had a given haplotype
# and how many barcodes supported a given haplotype
# Filter the haplotypes for those criteria to
# remove possible noise and infrequent haplotypes
##########################################################
# Haplotype Filters from the settings file
haplotype_min_barcode_filter = int(settings["minHaplotypeBarcodes"])
haplotype_min_sample_filter = int(settings["minHaplotypeSamples"])
haplotype_min_sample_fraction_filter = float(
settings["minHaplotypeSampleFraction"]
)
# Gather per haplotype data across samples
hap_counts = raw_results.groupby(
"haplotype_ID"
)["barcode_count"].sum().reset_index().rename(
columns={"barcode_count": "Haplotype Barcodes"})
hap_sample_counts = raw_results.groupby("haplotype_ID")[
"sample_name"].apply(lambda a: len(set(a))).reset_index().rename(
columns={"sample_name": "Haplotype Samples"})
num_samples = float(raw_results["sample_name"].unique().size)
hap_sample_counts["Haplotype Sample Fraction"] = (
hap_sample_counts["Haplotype Samples"] / num_samples
)
hap_counts = hap_counts.merge(hap_sample_counts)
initial_hap_count = len(hap_counts)
hap_counts = hap_counts.loc[(hap_counts["Haplotype Samples"]
>= haplotype_min_sample_filter)
& (hap_counts["Haplotype Sample Fraction"]
>= haplotype_min_sample_fraction_filter)
& (hap_counts["Haplotype Barcodes"]
>= haplotype_min_barcode_filter)]
print(("Out of {} initial haplotypes, {} were filtered using {}, {}, and "
"{} as minimum total UMI count; number and fraction of samples "
" the haplotype was observed in, respectively.").format(
initial_hap_count, initial_hap_count - len(hap_counts),
haplotype_min_barcode_filter, haplotype_min_sample_filter,
haplotype_min_sample_fraction_filter))
hap_df = raw_results.loc[raw_results["haplotype_ID"].isin(
hap_counts["haplotype_ID"])].groupby(
["gene_name", "mip_name", "haplotype_ID"])[
"haplotype_sequence"].first().reset_index()
# fill in fake sequence quality scores for each haplotype. These scores
# will be used for mapping only and the real scores for each haplotype
# for each sample will be added later.This step is probably unnecessary
# as the bwa mem algorithm does not seem to use the quality scores.
hap_df["quality"] = hap_df["haplotype_sequence"].apply(
lambda a: "H" * len(a))
haps = hap_df.set_index("haplotype_ID").to_dict(orient="index")
# BWA alignment
# create a fastq file for bwa input
with open(haplotypes_fq_file, "w") as outfile:
for h in haps:
outfile.write("@" + h + "\n")
outfile.write(haps[h]["haplotype_sequence"] + "\n" + "+" + "\n")
outfile.write(haps[h]["quality"] + "\n")
# run bwa
bwa(haplotypes_fq_file, haplotypes_sam_file, "sam", "", "", bwa_options,
species)
# process alignment output sam file
header = ["haplotype_ID", "FLAG", "CHROM", "POS", "MAPQ", "CIGAR", "RNEXT",
"PNEXT", "TLEN", "SEQ", "QUAL"]
sam_list = []
with open(haplotypes_sam_file) as infile:
for line in infile:
if not line.startswith("@"):
newline = line.strip().split()
samline = newline[:11]
for item in newline[11:]:
value = item.split(":")
if value[0] == "AS":
samline.append(int(value[-1]))
break
else:
samline.append(-5000)
sam_list.append(samline)
sam = pd.DataFrame(sam_list, columns=header + ["alignment_score"])
# find alignment with the highest alignment score. We will consider these
# the primary alignments and the source of the sequence.
sam["best_alignment"] = (sam["alignment_score"] == sam.groupby(
"haplotype_ID")["alignment_score"].transform("max"))
# add MIP column to alignment results
sam["MIP"] = sam["haplotype_ID"].apply(lambda a: a.split(".")[0])
# create call_info data frame for all used probes in the experiment
probe_sets_file = settings["mipSetsDictionary"]
probe_set_keys = settings["mipSetKey"]
used_probes = set()
for psk in probe_set_keys:
with open(probe_sets_file) as infile:
used_probes.update(json.load(infile)[psk])
with open(call_info_file) as infile:
call_info = json.load(infile)
call_df_list = []
for g in call_info:
for m in call_info[g]:
if m in used_probes:
mip_number = int(m.split("_")[-1][3:])
sub_number = int(m.split("_")[-2][3:])
for c in call_info[g][m]["copies"]:
call_dict = call_info[g][m]["copies"][c]
try:
call_dict.pop("genes")
except KeyError:
pass
try:
call_dict.pop("variants")
except KeyError:
pass
call_dict["gene"] = g
call_dict["MIP"] = m
call_dict["copy"] = c
call_dict["mip_number"] = mip_number
call_dict["sub_number"] = sub_number
call_df_list.append(pd.DataFrame(call_dict, index=[0]))
call_df = pd.concat(call_df_list, ignore_index=True, sort=True)
# combine alignment information with design information (call_info)
haplotype_maps = call_df.merge(
sam[["MIP", "haplotype_ID", "CHROM", "POS", "best_alignment",
"alignment_score"]])
haplotype_maps["POS"] = haplotype_maps["POS"].astype(int)
haplotype_maps = haplotype_maps.merge(
hap_df[["haplotype_ID", "haplotype_sequence"]])
# determine which haplotype/mapping combinations are for intended targets
# first, compare mapping coordinate to the MIP coordinate to see if
# a MIP copy matches with the alignment.
haplotype_maps["aligned_copy"] = (
(haplotype_maps["CHROM"] == haplotype_maps["chrom"])
& (abs(haplotype_maps["POS"] - haplotype_maps["capture_start"]) <= tol)
)
# aligned_copy means the alignment is on the intended MIP target
# this is not necessarily the best target, though. For a haplotype sequence
# to be matched to a MIP target, it also needs to be the best alignment.
haplotype_maps["mapped_copy"] = (haplotype_maps["aligned_copy"]
& haplotype_maps["best_alignment"])
# rename some fields to be compatible with previous code
haplotype_maps.rename(columns={"gene": "Gene", "copy": "Copy",
"chrom": "Chrom"}, inplace=True)
# any haplotype that does was not best mapped to at least one target
# will be considered an off target haplotype.
haplotype_maps["off_target"] = ~haplotype_maps.groupby(
"haplotype_ID")["mapped_copy"].transform("any")
off_target_haplotypes = haplotype_maps.loc[haplotype_maps["off_target"]]
# filter off targets and targets that do not align to haplotypes
haplotypes = haplotype_maps.loc[(~haplotype_maps["off_target"])
& haplotype_maps["aligned_copy"]]
# each MIP copy/haplotype_ID combination must have a single alignment
# if there are multiple, the best one will be chosen
def get_best_alignment(group):
return group.sort_values("alignment_score", ascending=False).iloc[0]
haplotypes = haplotypes.groupby(["MIP", "Copy", "haplotype_ID"],
as_index=False).apply(get_best_alignment)
haplotypes.index = (range(len(haplotypes)))
# filter to best mapping copy/haplotype pairs
mapped_haplotypes = haplotypes.loc[haplotypes["mapped_copy"]]
mapped_haplotypes["mapped_copy_number"] = mapped_haplotypes.groupby(
["haplotype_ID"])["haplotype_ID"].transform(len)
mapped_haplotypes.to_csv(os.path.join(
wdir, "mapped_haplotypes.csv"), index=False)
off_target_haplotypes.to_csv(os.path.join(
wdir, "offtarget_haplotypes.csv"), index=False)
haplotypes.to_csv(os.path.join(
wdir, "aligned_haplotypes.csv"), index=False)
haplotype_maps.to_csv(os.path.join(
wdir, "all_haplotypes.csv"), index=False)
num_hap = len(set(haplotype_maps["haplotype_ID"]))
num_off = len(set(off_target_haplotypes["haplotype_ID"]))
print(("{} of {} haplotypes were off-target, either not mapping to "
"the reference genome, or best mapping to a region which was "
"not targeted.").format(num_off, num_hap))
return
def get_vcf_haplotypes(settings):
"""
Reverse compatibile map_haplotypes function.
This is the old name for map_haplotypes function. Some notebooks might
use the old name. So this will just run the map_haplotypes when called
by the old name.
"""
map_haplotypes(settings)
def get_haplotype_counts(settings):
"""Get UMI and read counts for each on target haplotype for each sample.
MIPWrangler output has the UMI and read counts per haplotype but some of
those are off target and some are mapping to multiple loci by design.
The decision on whether a haplotype sequence is on or off target and where
it maps best or if it maps to multiple loci are made by the map_haplotypes
function. This function distributes the UMI and read counts in the
MIPWrangler output using the mapped haplotypes data for each sample.
If a haplotype sequence is uniquely mapping to a targeted locus, we
allocate all reads for that sample and haplotype sequence to that locus.
If it is mapping to multiple places, we determine the ratios of those
'paralogous copies' for that sample based on the average mapping around
each locus and allocate the reads for that sample and that haplotype
sequence proportionally to the mapped loci. If a haplotype sequence is
mapping best to an unintended locus, we remove those.
"""
wdir = settings["workingDir"]
##########################################################
##########################################################
# Process 1: use sample sheet to determine which data points from the
# mipster file should be used, print relevant statistics.
##########################################################
##########################################################
# process sample sheets
run_meta = pd.read_table(os.path.join(wdir, "samples.tsv"))
# create a unique sample ID for each sample using sample name,
# sample set and replicate fields from the sample list file.
run_meta["sample_name"] = (
run_meta["sample_name"].astype(str)
)
run_meta["Sample Name"] = run_meta["sample_name"]
run_meta["Sample ID"] = run_meta[
["sample_name", "sample_set", "replicate"]
].apply(lambda a: "-".join(map(str, a)), axis=1)
# Sample Set key is reserved for meta data
# but sometimes erroneously included in the
# sample sheet. It should be removed.
try:
run_meta.drop("Sample Set", inplace=True, axis=1)
except (ValueError, KeyError):
pass
# a change to the formatting of sample sheets uses library_prep
# instead of Library Prep, so the below line is for backwards compatibility
run_meta.rename(columns={"library_prep": "Library Prep"}, inplace=True)
# drop duplicate values originating from
# multiple sequencing runs of the same libraries
run_meta = run_meta.drop_duplicates()
run_meta = run_meta.groupby(
["Sample ID", "Library Prep"]
).first().reset_index()
run_meta.to_csv(os.path.join(wdir, "run_meta.csv"))
# get used sample ids
sample_ids = run_meta["Sample ID"].unique().tolist()
##########################################################
##########################################################
# Process 2: extract all observed variants from observed
# haplotypes and create a variation data frame that will
# be able to map haplotype IDs to variation.
##########################################################
##########################################################
# get the haplotype dataframe for all mapped haplotypes
mapped_haplotype_df = pd.read_csv(
os.path.join(wdir, "mapped_haplotypes.csv"))
##########################################################
##########################################################
# Process 3: load the MIPWrangler output which has
# per sample per haplotype information, such as
# haplotype sequence quality, barcode counts etc.
# Create a suitable dataframe that can be merged
# with variant data to get the same information for each
# variant (variant barcode count, variant quality, etc.)
##########################################################
##########################################################
# get the MIPWrangler Output
raw_results = pd.read_table(os.path.join(wdir, settings["mipsterFile"]))
# limit the results to the samples intended for this analysis
raw_results = raw_results.loc[
raw_results["sample_name"].isin(sample_ids)
]
# rename some columns for better visualization in tables
raw_results.rename(
columns={"sample_name": "Sample ID",
"mip_name": "MIP",
"gene_name": "Gene",
"barcode_count": "Barcode Count",
"read_count": "Read Count"},
inplace=True
)
# use only the data corresponding to mapped haplotypes
# filtering the off target haplotypes.
mapped_results = raw_results.merge(mapped_haplotype_df, how="inner")
# Try to estimate the distribution of data that is mapping
# to multiple places in the genome.
# This is done in 4 steps.
# 1) Get uniquely mapping haplotypes and barcode counts
unique_df = mapped_results.loc[mapped_results["mapped_copy_number"] == 1]
unique_table = pd.pivot_table(unique_df,
index="Sample ID",
columns=["Gene", "MIP", "Copy", "Chrom"],
values=["Barcode Count"],
aggfunc=np.sum)
# 2) Estimate the copy number of each paralog gene
# for each sample from the uniquely mapping data
# Two values from the settings are used to determine the copy number
# in a given gene. Average copy count is the ploidy of the organism
# and the normalization percentile is what percentile is used for
# normalizing data. For example, for human genes ACC is 2 and
# if the percentiles are given as 0.4, 0.6: we would calculate the
# take the 40th and 60th percentile of them barcode counts for each probe
# across the samples and assume that the average of 40th and 60 pctl values
# to represent the average copy count of 2. Then caluculate this value
# for each probe and each sample.
try:
average_copy_count = float(settings["averageCopyCount"])
norm_percentiles = list(map(float,
settings["normalizationPercentiles"]))
except KeyError:
average_copy_count = 2
norm_percentiles = [0.4, 0.6]
unique_df.loc[:, "Copy Average"] = average_copy_count
# Adjusted barcode count will represent the estimated barcode count
# for multimapping haplotypes. For example, if hap1 is mapping to 2
# places in the genome and its barcode count for a sample containing this
# haplotype is 100. If we determined the copy numbers of the two mapping
# regions to be 1 and 1, the adjusted barcode count for each region
# would be 50. We'll set this value for uniquely mapping haplotypes
# to the Barcode Count, as they are not multi mapping.
unique_df.loc[:, "Adjusted Barcode Count"] = unique_df["Barcode Count"]
unique_df.loc[:, "Adjusted Read Count"] = unique_df["Read Count"]
unique_table.fillna(0, inplace=True)
# calculate the copy counts using the get_copy_counts function.
# this function normalizes data for each probe across samples
# and estimates copy counts using the percentile values as mentioned.
copy_counts = get_copy_counts(unique_table,
average_copy_count,
norm_percentiles)
# 3) Estimate the copy number of each "Gene"
# from the average copy count of uniquely mapping
# data for all MIPs within the gene.
cc = copy_counts.groupby(level=["Gene", "Copy"], axis=1).sum()
gc = copy_counts.groupby(level=["Gene"], axis=1).sum()
ac = cc.div(gc, level="Gene")
# 4) Distribute multi mapping data proportional to
# Paralog's copy number determined from the
# uniquely mapping data
multi_df = mapped_results.loc[mapped_results["mapped_copy_number"] > 1]
if not multi_df.empty:
# get the average copy count for the gene the haplotype belongs to
mca = multi_df.apply(lambda r: get_copy_average(r, ac), axis=1)
multi_df.loc[mca.index, "Copy Average"] = mca
multi_df["copy_sum"] = multi_df.groupby(
["Sample ID", "haplotype_ID"])["Copy Average"].transform("sum")
multi_df["copy_len"] = multi_df.groupby(
["Sample ID", "haplotype_ID"])["Copy Average"].transform("size")
null_index = multi_df["copy_sum"] == 0
multi_df.loc[null_index, "Copy Average"] = (
average_copy_count / multi_df.loc[null_index, "copy_len"])
multi_df.loc[null_index, "copy_sum"] = average_copy_count
multi_df["Copy Average"].fillna(0, inplace=True)
multi_df["Adjusted Barcode Count"] = (multi_df["Barcode Count"]
* multi_df["Copy Average"]
/ multi_df["copy_sum"])
multi_df["Adjusted Read Count"] = (multi_df["Read Count"]
* multi_df["Copy Average"]
/ multi_df["copy_sum"])
# Combine unique and multimapping data
combined_df = pd.concat([unique_df, multi_df], ignore_index=True,
sort=True)
combined_df.rename(
columns={
"Barcode Count": "Raw Barcode Count",
"Adjusted Barcode Count": "Barcode Count",
"Read Count": "Raw Read Count",
"Adjusted Read Count": "Read Count"
},
inplace=True
)
# print total read and barcode counts
print(
(
"Total number of reads and barcodes were {0[0]} and {0[1]}."
" On target number of reads and barcodes were {1[0]} and {1[1]}."
).format(
raw_results[["Read Count", "Barcode Count"]].sum(),
combined_df[["Read Count", "Barcode Count"]].sum().astype(int)
)
)
combined_df.to_csv(os.path.join(wdir, "haplotype_counts.csv"), index=False)
# So far the count data only includes MIPs that has at least one read
# in at least one sample. We would like to include MIPs with no reads
# as well. So we'll create a dataframe that has all the intended MIPs
# and merge with the count data.
# create call_info data frame for all used probes in the experiment
call_info_file = settings["callInfoDictionary"]
probe_sets_file = settings["mipSetsDictionary"]
probe_set_keys = settings["mipSetKey"]
used_probes = set()
for psk in probe_set_keys:
with open(probe_sets_file) as infile:
used_probes.update(json.load(infile)[psk])
with open(call_info_file) as infile:
call_info = json.load(infile)
call_df_list = []
for g in call_info:
for m in call_info[g]:
if m in used_probes:
for c in call_info[g][m]["copies"]:
call_dict = {"MIP": m, "Copy": c}
call_df_list.append(pd.DataFrame(call_dict, index=[0]))
call_df = pd.concat(call_df_list, ignore_index=True, sort=True)
# merge the count data with probe data. Fill missing values with 0.
combined_df = call_df.merge(combined_df, how="left").fillna(0)
# Create pivot table of combined barcode counts
# This is a per MIP per sample barcode count table
# of the samples with sequencing data
barcode_counts = pd.pivot_table(combined_df,
index="Sample ID",
columns=["MIP",
"Copy"],
values=["Barcode Count"],
aggfunc=np.sum)
# Sample name for probes without data would be NA and replaced to 0
# remove that if it exists
try:
barcode_counts.drop(0, inplace=True)
except KeyError:
pass
print("There are {} samples with sequence data".format(
barcode_counts.shape[0]
))
# After pivot table is created, the column names have an extra
# row with the name "Barcode Count". Remove that from column names.
bc_cols = barcode_counts.columns
bc_cols = [bc[1:] for bc in bc_cols]
# barcode count data is only available for samples with data
# so if a sample has not produced any data, it will be missing
# these samples should be added with 0 values for each probe
all_barcode_counts = pd.merge(
run_meta[["Sample ID", "replicate"]].set_index("Sample ID"),
barcode_counts, left_index=True, right_index=True, how="left")
all_barcode_counts.drop("replicate", axis=1, inplace=True)
# fix column names
all_barcode_counts.columns = pd.MultiIndex.from_tuples(
bc_cols, names=["MIP", "Copy"]
)
all_barcode_counts.fillna(0, inplace=True)
print("There are {} total samples.".format(all_barcode_counts.shape[0]))
all_barcode_counts.to_csv(os.path.join(wdir, "barcode_counts.csv"))
# Create an overview statistics file for samples including
# total read count, barcode count, and how well they cover each MIP.
sample_counts = combined_df.groupby("Sample ID")[["Read Count",
"Barcode Count"]].sum()
# Find samples without any data and print the number
no_data = run_meta.loc[
~run_meta["Sample ID"].isin(sample_counts.index)
]
print(("{} out of {} samples had no data and they will be excluded from "
"the variant calls.").format(no_data.shape[0], run_meta.shape[0]))
# add samples with no data
sample_counts = pd.merge(
run_meta[["Sample ID", "replicate"]].set_index("Sample ID"),
sample_counts, left_index=True, right_index=True, how="left")
sample_counts.drop("replicate", axis=1, inplace=True)
target_cov = pd.concat(
[(all_barcode_counts >= 1).sum(axis=1),
(all_barcode_counts >= 5).sum(axis=1),
(all_barcode_counts >= 10).sum(axis=1)],
axis=1,
).rename(
columns={
0: "targets_with_1_barcodes",
1: "targets_with_5_barcodes",
2: "targets_with_10_barcodes"
}
)
sample_counts = sample_counts.merge(target_cov,
how="outer",
left_index=True,
right_index=True).fillna(0)
target_cov_file = os.path.join(wdir, "sample_summary.csv")
sample_counts.to_csv(target_cov_file)
return
def freebayes_call(bam_dir="/opt/analysis/padded_bams",
fastq_dir="/opt/analysis/padded_fastqs",
options=[],
vcf_file="/opt/analysis/variants.vcf.gz",
targets_file=None, make_fastq=True,
align=True, settings=None, settings_file=None,
bam_files=None, bam_list=None, verbose=True,
fastq_padding=20, min_base_quality=1,
errors_file="/opt/analysis/freebayes_errors.txt",
warnings_file="/opt/analysis/freebayes_warnings.txt",
merge_distance=1000, contig_padding=500):
"""Call variants for MIP data using freebayes.
A mapped haplotype file must be present in the working directory. This
is generated during haplotype processing. Per sample fastqs and bams
will be created if align=True. Fastqs are generated with a default 20 bp
padding on each side of the haplotype. This assumes that there were no
errors where the MIP arms bind to the DNA. It may cause some false negative
calls where there was imperfect binding, but it is crucial for determining
variants close to the MIP arms.
Parameters
----------
bam_dir: str/path, /opt/analysis/padded_bams
path to the directory where per sample bam files are or where they
will be created if align=True.
fastq_dir: str/path, /opt/analysis/padded_fastqs
path to the directory where per sample fastq files are or where they
will be created if align=True.
vcf_file: str/path, /opt/analysis/variants.vcf.gz
Output vcf file path.
options: list, []
options to pass to freebayes directly, such as --min-coverage
the list must have each parameter and value as separate items.
For example, ["--min-alternate-count", "2"] and not
["--min-alternate-count 2"]
align: bool, True
Set to false if fastq and bam files have already been created.
settings: dict, None
Analysis settings dictionary. Either this or settings_file must
be provided.
settings_file: str/path, None
Path to the analysis settings file. Either this or the settings dict
must be provided.
targets_file: str/path, None
Path to targets file to force calls on certain locations even if
those variants do not satisfy filter criteria. It must be a tab
separated text file with minimum columns CHROM, POS, REF, ALT.
bam_files: list, None
list of bam files within the bam_dir to pass to freebayes. If None (
default), all bam files in the bam_dir will be used.
verbose: bool, True
if set to True, print errors and warnings in addition to saving to
errors and warnings files.
errors_file: str/path, /opt/analysis/freebayes_errors.txt
file to save freebayes errors.
warnings_file: str/path, /opt/analysis/freebayes_warnings
file to save freebayes warnings
merge_distance: int, 200
When creating contigs from MIP target regions, merge targets closer
to each other than this distance.
contig_padding: int, 50
Add this much padding to the contigs when calling freebayes.
"""
# get the analysis settings
# check if both settings and the settings file are None:
if (settings is None) and (settings_file is None):
print("settings or settings file must be provided for freebayes_call.")
return
else:
if settings is None:
settings = get_analysis_settings(settings_file)
else:
settings = copy.deepcopy(settings)
# get the working directory from settings
wdir = settings["workingDir"]
# load mapped haplotypes file. This file has the genomic locations
# of the haplotypes in mip data
mapped_haplotypes_file = os.path.join(wdir, "mapped_haplotypes.csv")
# get the mip data file location. This file has per sample haplotype
# information including counts.
mipster_file = os.path.join(wdir, settings["mipsterFile"])
if make_fastq:
# create fastq files from MIP data. One read per UMI will be created.
generate_mapped_fastqs(fastq_dir, mipster_file,
mapped_haplotypes_file, settings["species"],
pro=int(settings["processorNumber"]),
pad_size=fastq_padding)
if align:
# map per sample fastqs to the reference genome, creating bam files.
# bam files will have sample groups added, which is required for
# calling variants across the samples.
bwa_multi([], "bam", fastq_dir, bam_dir,
settings["bwaOptions"], settings["species"],
int(settings["processorNumber"]),
int(settings["processorNumber"]))
# divide data into contigs to make parallelization more efficient
# we'll create contigs from overlapping MIPs.
# load the call info dictionary which contains per MIP information
call_file = settings["callInfoDictionary"]
with open(call_file) as infile:
call_dict = json.load(infile)
# create a dataframe that has the genomic coordinates of each MIP
call_df = []
for g in call_dict:
for m in call_dict[g]:
for c in call_dict[g][m]["copies"]:
cdict = call_dict[g][m]["copies"][c]
call_df.append([cdict["chrom"], cdict["capture_start"],
cdict["capture_end"]])
call_df = pd.DataFrame(call_df, columns=["chrom", "capture_start",
"capture_end"])
# create a function that generates contigs of MIPs which overlap
# with 1 kb padding on both sides.
def get_contig(g):
intervals = zip(g["capture_start"], g["capture_end"])
return pd.DataFrame(merge_overlap(
[list(i) for i in intervals], spacer=merge_distance))
# create contigs per chromosome
contigs = call_df.groupby("chrom").apply(get_contig)
contigs = contigs.reset_index()
contigs.rename(columns={"level_1": "contig", 0: "contig_capture_start",
1: "contig_capture_end"}, inplace=True)
contigs["contig_name"] = contigs["chrom"] + "_" + contigs["contig"].astype(
str)
# we'll call freebayes on each contig by providing a region string in the
# form chrx:begin-end. Create those strings for each contig with some
# padding. It is important to check that we don't end up with a start
# position of <1 or end position longer than chom length.
# Begin by adding chromosome length to contig info.
# get reference chromosome lengths
genome_file = get_file_locations()[settings["species"]]["fasta_genome"]
reference_lengths = {}
genome_sam = pysam.FastaFile(genome_file)
for r in genome_sam.references:
reference_lengths[r] = genome_sam.get_reference_length(r)
contigs["chromosome_length"] = contigs["chrom"].map(reference_lengths)
contigs["region_start"] = contigs["contig_capture_start"] - contig_padding
contigs.loc[contigs["region_start"] < 1, "region_start"] = 1
contigs["region_end"] = contigs["contig_capture_end"] + contig_padding
contigs["region_end"] = contigs[
["region_end", "chromosome_length"]].min(axis=1).values
contigs["region"] = contigs["chrom"] + ":" + (
contigs["region_start"]).astype(str) + "-" + (
contigs["region_end"]).astype(str)
# we'll force calls on targeted variants if so specified
if targets_file is not None:
# each contig must include at least one of the targets, otherwise
# freebayes throws an error. So we'll load the targets and add the
# targets option to only those contigs that contain targets
targets = pd.read_table(targets_file)
# merge targets and contigs dataframes to determine which contigs
# contain targets. chrom will be used as the common column name
targets["chrom"] = targets["CHROM"]
targets = targets.merge(contigs)
# remove rows where chrom is shared but target position is outside
# of contig boundries.
targets = targets.loc[
(targets["contig_capture_start"] <= targets["POS"])
& (targets["POS"] <= targets["contig_capture_end"])]
targets["contains_targets"] = True
# merge only two columns of the targets df to contigs so that
# the only shared column is contig_name. More than one target can
# be in a single contig, so we need to drop duplicates from targets.
contigs = contigs.merge(targets[
["contig_name", "contains_targets"]].drop_duplicates(), how="left")
contigs["contains_targets"].fillna(False, inplace=True)
# create a targets.vcf file for freebayes
targets_vcf = os.path.join(wdir, "targets.vcf")
with open(targets_vcf, "w") as outfile:
outfile.write('##fileformat=VCFv4.2\n')
outfile.write(
'##FILTER=<ID=PASS,Description="All filters passed">\n')
outfile.write('##INFO=<ID=TR,Number=.,Type=String,Description'
'="Targeted variant.">\n')
vcf_fields = ["ID", "QUAL", "FILTER"]
for vf in vcf_fields:
targets[vf] = "."
targets["INFO"] = "TR"
vcf_fields = ["#CHROM", "POS", "ID", "REF", "ALT", "QUAL",
"FILTER", "INFO"]
targets = targets.rename(columns={"CHROM": "#CHROM"})[vcf_fields]
targets.sort_values(["#CHROM", "POS"]).to_csv(
outfile, sep="\t", index=False)
# bgzip and index
res = subprocess.run(["bgzip", "-f", targets_vcf],
stderr=subprocess.PIPE)
if res.returncode != 0:
print("Error in compressing targets.vcf file", res.stderr)
targets_vcf = targets_vcf + ".gz"
res = subprocess.run(["tabix", "-s", "1", "-b", "2", "-e", "2", "-f",
targets_vcf], stderr=subprocess.PIPE)
if res.returncode != 0:
print("Error in indexing targets.vcf.gz file ", res.stderr)
else:
contigs["contains_targets"] = False
# create a contig dictionary from the contigs dataframe
# this dict will be passed to the worker function for parallelization
chrom_dict = {}
gb = contigs.groupby("chrom")
for g in gb.groups.keys():
gr = gb.get_group(g)
chrom_dict[g] = gr[["contig_name", "region",
"contains_targets"]].set_index(
"contig_name").to_dict(orient="index")
# populate the contigs dictionary for freebayes parameters
# start with options to be added for each contig
# get fasta genome location
genome_fasta = get_file_locations()[settings["species"]]["fasta_genome"]
# specify fasta genome file
options.extend(["-f", genome_fasta])
# add if bam files are specified. Nothing should be added to options
# after the bam files.
if bam_files is not None:
options.extend(bam_files)
if bam_list is not None:
options.extend(["-L", bam_list])
# create a file list in the bam_dir that has full path to all bam files
# if all bam files are to be used
else:
bam_list = os.path.join(bam_dir, "bamlist.txt")
with open(bam_list, "w") as outfile:
for f in os.scandir(bam_dir):
if os.path.splitext(f.name)[1] == ".bam":
outfile.write(f.path + "\n")
options.extend(["-L", bam_list])
# add minimum base quality parameter to options if not already provided
if ("--min-base-quality" not in options) and ("-q" not in options):
options.extend(["-q", str(min_base_quality)])
# create a list for keeping all contig vcf file paths to concatanate
# them at the end.
contig_vcf_paths = []
# create a similar list for zipped vcf files
contig_vcf_gz_paths = []
# create a list of per contig dictionary to feed to multiprocessing
# function apply_async
contig_dict_list = []
# create the contigs vcf directory
cvcfs_dir = os.path.join(wdir, "contig_vcfs")
if not os.path.exists(cvcfs_dir):
os.makedirs(cvcfs_dir)
# update contig_dict with contig specific options
for chrom in chrom_dict:
for contig_name in chrom_dict[chrom]:
contig_dict = chrom_dict[chrom][contig_name]
################################################################
# create contig specific options and
# add contigs region string (chrx:begin-end)
region = contig_dict["region"]
contig_options = ["-r", region]
# add contigs vcf file name
contig_vcf = os.path.join(wdir, "contig_vcfs",
contig_name + ".vcf")
contig_dict["vcf_path"] = contig_vcf
# add output file to the freebayes options
contig_options.extend(["-v", contig_vcf])
# add contig vcf path to the list
contig_vcf_paths.append(contig_vcf)
# add contigs vcf.gz file name
contig_vcf_gz = os.path.join(wdir, "contig_vcfs",
contig_name + ".vcf.gz")
contig_vcf_gz_paths.append(contig_vcf_gz)
contig_dict["vcf_gz_path"] = contig_vcf_gz
# if contig includes targets, we'll force calls on those
if contig_dict["contains_targets"]:
contig_options.extend(["-@", targets_vcf])
# we'll add the contig specific options to the beginning of
# the options list in case bam files were added to the options
# and they must stay at the end because they are positional args.
contig_dict["options"] = contig_options + options
# add the contig dict to contig dict list
contig_dict_list.append(contig_dict)
# create a processor pool for parallel processing
pool = Pool(int(settings["processorNumber"]))
# create a results container for the return values from the worker function
results = []
errors = []
# run the freebayes worker program in parallel
pool.map_async(freebayes_worker, contig_dict_list, callback=results.extend,
error_callback=errors.extend)
# join and close the processor pool.
pool.close()
pool.join()
# compare the length of the results object and the number of contigs
# print an error message if they are not the same
if len(contig_dict_list) != (len(results) + len(errors)):
print(("Number of contigs, {}, is not the same as number of results "
"from the variant caller, {}, plus number of errors, {}. "
"This means some calls have failed silently. "
"Results and errors should be inspected.").format(
len(contig_dict_list), len(results), len(errors)))
# check each contig's variant call results for errors and warnings
# open files to save errors and warnings
with open(errors_file, "w") as ef, open(warnings_file, "wb") as wf:
# keep a count of warnings an errors
error_count = 0
warning_count = 0
for res in results:
for r in res:
try:
r.check_returncode()
except subprocess.CalledProcessError as e:
error_count += 1
ef.write(str(e) + "\n")
if verbose:
print("Error in freebayes calls: ", e)
# print if any warnings were issued
if len(r.stderr) > 0:
warning_count += 1
wf.write(r.stderr + b"\n")
if verbose:
print("Warning in freebayes calls: ", r.stderr)
# if errors are not printed but present, print an message to indicate
# the presence of errors/warnings
if not verbose:
if error_count > 0:
print(("Errors were encountered in freebayes calls."
" Please inspect {} for errors.").format(errors_file))
if warning_count > 0:
print(("There were warnings in freebayes calls."
" Please inspect {} for warnings.").format(
warnings_file))
if len(errors) > 0:
print(("There were {} calls that failed").format(len(errors)))
# concatanate contig vcfs. The number of contigs may be high, so we'll
# write the vcf paths to a file and bcftools will read from that file
cvcf_paths_file = os.path.join(wdir, "contig_vcfs", "vcf_file_list.txt")
with open(cvcf_paths_file, "w") as outfile:
outfile.write("\n".join(contig_vcf_gz_paths) + "\n")
subprocess.run(["bcftools", "concat", "-f", cvcf_paths_file, "-Oz",
"-o", vcf_file], check=True)
subprocess.run(["bcftools", "index", "-f", vcf_file], check=True)
# fix vcf header if --gvcf option has been used
if "--gvcf" in options:
temp_vcf_path = os.path.join(wdir, "temp.vcf.gz")
vcf_reheader(os.path.basename(vcf_file), temp_vcf_path, wdir=wdir)
old_vcf_path = os.path.join(wdir, "unfixed.vcf.gz")
subprocess.run(["mv", vcf_file, old_vcf_path])
subprocess.run(["mv", temp_vcf_path, vcf_file])
subprocess.run(["bcftools", "index", "-f", vcf_file], check=True)
return (contig_dict_list, results, errors)
def freebayes_worker(contig_dict):
"""Run freebayes program with the specified options.
Run freebayes program with the specified options and return a
subprocess.CompletedProcess object.
"""
options = contig_dict["options"]
command = ["freebayes"]
command.extend(options)
# run freebayes command piping the output
fres = subprocess.run(command, stderr=subprocess.PIPE)
# check the return code of the freebayes run. if succesfull continue
if fres.returncode == 0:
# bgzip the vcf output, using the freebayes output as bgzip input
vcf_path = contig_dict["vcf_path"]
gres = subprocess.run(["bgzip", "-f", vcf_path],
stderr=subprocess.PIPE)
# make sure bugzip process completed successfully
if gres.returncode == 0:
# index the vcf.gz file
vcf_gz_path = contig_dict["vcf_gz_path"]
ires = subprocess.run(["bcftools", "index", "-f", vcf_gz_path],
stderr=subprocess.PIPE)
# return the CompletedProcess objects
return (fres, gres, ires)
else:
return (fres, gres)
# if freebayes call failed, return the completed process object
# instead of attempting to zip the vcf file which does not exist if
# freebayes failed.
else:
return (fres, )
def vcf_reheader(vcf_file, fixed_vcf_file, wdir="/opt/analysis/"):
"""Fix vcf header QA/QR fields.
When --gvcf option is used in freebayes variant calling pipeline,
the header of the vcf file comes out incorrect for QA/QR fields number
type, Integer instead of Float. This function fixes those lines from
the header and creates a new vcf file with the correct header.
"""
# get the current header
vcf_path = os.path.join(wdir, vcf_file)
header = subprocess.Popen(["bcftools", "view", "-h", vcf_path],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
com = header.communicate()
if header.returncode != 0:
print("Failed to extract vcf header. Header will not be fixed.")
return
# convert the header byte string to text and creale a list of lines
head = com[0].decode("utf-8").split("\n")
# create a new header with fixed fields
new_head = []
for line in head:
if ("ID=QA" in line) or ("ID=QR" in line):
new_head.append(line.replace("Type=Integer", "Type=Float"))
else:
new_head.append(line)
new_header_path = os.path.join(wdir, "new_vcf_header.txt")
with open(new_header_path, "w") as outfile:
outfile.write("\n".join(new_head) + "\n")
fixed_vcf_path = os.path.join(wdir, fixed_vcf_file)
subprocess.run(["bcftools", "reheader", "-h", new_header_path,
vcf_path, "-o", fixed_vcf_path], check=True)
return
def gatk(options):
"""GATK wrapper function.
Run gatk program with the given options. Return the subprocess result.
"""
return subprocess.run(["gatk", *options], stderr=subprocess.PIPE)
def gatk_file_prep(bam_dir="/opt/analysis/padded_bams",
fastq_dir="/opt/analysis/padded_fastqs",
targets_file=None,
settings=None, settings_file=None,
errors_file="/opt/analysis/gatk_file_prep_output.txt"):
"""Prepare files for calling variants for MIP data using gatk.
A mapped haplotype file must be present in the working directory. This
is generated during haplotype processing. Per sample fastqs and bams
will be created. Fastqs are generated with a default 20 bp
padding on each side of the haplotype. This assumes that there were no
errors where the MIP arms bind to the DNA. It may cause some false negative
calls where there was imperfect binding, but it is crucial for determining
variants close to the MIP arms.
Parameters
----------
bam_dir: str/path, /opt/analysis/padded_bams
path to the directory where per sample bam files are or where they
will be created if align=True.
fastq_dir: str/path, /opt/analysis/padded_fastqs
path to the directory where per sample fastq files are or where they
will be created if align=True.
settings: dict, None
Analysis settings dictionary. Either this or settings_file must
be provided.
settings_file: str/path, None
Path to the analysis settings file. Either this or the settings dict
must be provided.
targets_file: str/path, None
Path to targets file to force calls on certain locations even if
those variants do not satisfy filter criteria. It must be a tab
separated text file with minimum columns CHROM, POS, REF, ALT.
errors_file: str/path, /opt/analysis/gatk_file_prep_errors.txt
file to save freebayes errors.
"""
# get the analysis settings
# check if both settings and the settings file are None:
if (settings is None) and (settings_file is None):
print("settings or settings file must be provided for freebayes_call.")
return
else:
if settings is None:
settings = get_analysis_settings(settings_file)
else:
settings = copy.deepcopy(settings)
# get the working directory from settings
wdir = settings["workingDir"]
# load mapped haplotypes file. This file has the genomic locations
# of the haplotypes in mip data
mapped_haplotypes_file = os.path.join(wdir, "mapped_haplotypes.csv")
# get the mip data file location. This file has per sample haplotype
# information including counts.
mipster_file = os.path.join(wdir, settings["mipsterFile"])
# get the mip data file location. This file has per sample haplotype
# information including counts.
mipster_file = os.path.join(wdir, settings["mipsterFile"])
# create fastq files from MIP data. One read per UMI will be created.
generate_mapped_fastqs(fastq_dir, mipster_file,
mapped_haplotypes_file, settings["species"],
pro=int(settings["processorNumber"]))
# if there is a targets file provided, we'll create a hypothetical
# sample that has all of the targeted variants. This way, a variant site
# for each target will be created in the final vcf file even if a
# variant was not observed in the data.
if targets_file is not None:
# load the targets as dataframe converting field names to
# field names in a haplotypes file.
targets = pd.read_table(targets_file).rename(
columns={"CHROM": "Chrom", "POS": "capture_start",
"ALT": "haplotype_sequence",
"mutation_name": "haplotype_ID"})
# fill in orientation and copy number information for all targets.
targets["orientation"] = "forward"
targets["mapped_copy_number"] = 1
targets["capture_end"] = (targets["capture_start"]
+ targets["REF"].apply(len) - 1)
# create a haplotype file for the targeted mutations
haplotype_fields = ['capture_end', 'capture_start', 'Chrom',
'orientation', 'haplotype_ID',
'haplotype_sequence', 'mapped_copy_number']
mutant_haplotypes = "/opt/analysis/mutant_haplotypes.csv"
targets[haplotype_fields].to_csv(mutant_haplotypes, index=False)
# create a hypothetical sample that has all mutations and a
# corresponding mip data file that shows a UMI count of 20
# for each observation
targets["sample_name"] = "control_mutant"
targets["sequence_quality"] = targets["haplotype_sequence"].apply(
lambda a: "".join(["H" for i in range(len(a))]))
targets["barcode_count"] = 20
data_fields = ["sample_name", 'haplotype_ID', "haplotype_sequence",
'sequence_quality', 'barcode_count']
mutant_data_file = "/opt/analysis/mutant_data.tsv"
targets[data_fields].to_csv(mutant_data_file, index=False, sep="\t")
# create a fastq file for the "control_mutant" sample
padding = 100
generate_mapped_fastqs(fastq_dir, mutant_data_file,
mutant_haplotypes, settings["species"],
pro=int(settings["processorNumber"]),
pad_size=padding)
# map per sample fastqs to the reference genome, creating bam files.
# bam files will have sample groups added, which is required for
# calling variants across the samples.
bwa_multi([], "bam", fastq_dir, bam_dir,
settings["bwaOptions"], settings["species"],
int(settings["processorNumber"]),
int(settings["processorNumber"]))
# create an intervals file to be used in gatk call
intervals_bed = "/opt/analysis/intervals.bed"
call_file = settings["callInfoDictionary"]
with open(call_file) as infile:
call_dict = json.load(infile)
# create a dataframe that has the genomic coordinates of each MIP
probe_info = []
for g in call_dict:
for m in call_dict[g]:
for c in call_dict[g][m]["copies"]:
cdict = call_dict[g][m]["copies"][c]
probe_info.append([cdict["chrom"], cdict["capture_start"],
cdict["capture_end"]])
probe_info = pd.DataFrame(probe_info, columns=["chrom", "capture_start",
"capture_end"])
probe_info["bed_start"] = probe_info["capture_start"] - 200
probe_info["bed_end"] = probe_info["capture_end"] + 200
probe_info[["chrom", "bed_start", "bed_end"]].to_csv(
intervals_bed, index=False, header=(None), sep="\t")
intervals_list = "/opt/analysis/intervals.list"
genome_dict = get_file_locations()[settings["species"]]["genome_dict"]
interval_call = gatk(["BedToIntervalList", "-I", intervals_bed,
"-O", intervals_list, "-SD", genome_dict])
# check the return code and if not 0 print warning
if interval_call.returncode != 0:
print(("An error ocurred when creating the intervals list. "
"Please see the {} for details.").format(errors_file))
# save command output
with open(errors_file, "ab") as outfile:
outfile.write(interval_call.stderr)
def gatk_haplotype_caller(
options, bam_dir, settings,
errors_file="/opt/analysis/gatk_haplotype_caller_output.txt"):
genome_fasta = get_file_locations()[settings["species"]]["fasta_genome"]
intervals_list = "/opt/analysis/intervals.list"
haplotype_caller_opts = ["HaplotypeCaller", "-R", genome_fasta,
"--native-pair-hmm-threads", "1",
"-L", intervals_list] + options
# scan the bam directory and get file paths. Assign an output name
# for each file (gvcf output)
bam_files = []
for f in os.scandir(bam_dir):
if os.path.splitext(f.name)[1] == ".bam":
base_name = os.path.splitext(f.name)[0]
gvcf = os.path.join(bam_dir, base_name + ".g.vcf.gz")
bam_files.append([f.path, gvcf])
pool = NoDaemonProcessPool(int(settings["processorNumber"]))
results = []
errors = []
for bam in bam_files:
io_options = ["-I", bam[0], "-O", bam[1]]
pool.apply_async(gatk, (haplotype_caller_opts + io_options, ),
callback=results.append, error_callback=errors.append)
pool.close()
pool.join()
if len(errors) > 0:
print(("An error ocurred during haplotype calling . "
"Please see the {} for details.").format(errors_file))
# save command output
with open(errors_file, "ab") as outfile:
for e in errors:
outfile.write(str(e))
for r in results:
if r.returncode != 0:
print(("An error ocurred when creating the intervals list. "
"Please see the {} for details.").format(errors_file))
# save command output
with open(errors_file, "ab") as outfile:
outfile.write(r.stderr)
return
def genotype_gvcfs(settings, bam_dir, options, gdb, vcf_file,
sample_map=None, keep_control_mutant=False,
errors_file="/opt/analysis/gatk_genotype_gvcfs_output.txt"):
if sample_map is None:
# scan the bam directory and get file paths. Assign an output name
# for each file (gvcf output)
bam_files = []
for f in os.scandir(bam_dir):
if os.path.splitext(f.name)[1] == ".bam":
base_name = os.path.splitext(f.name)[0]
gvcf = os.path.join(bam_dir, base_name + ".g.vcf.gz")
bam_files.append([f.path, gvcf])
sample_map = os.path.join(settings["workingDir"], "sample_map.txt")
with open(sample_map, "w") as outfile:
for f in bam_files:
sample_name = ".".join(os.path.basename(f[0]).split(".")[:-2])
outfile.write(sample_name + "\t" + f[1] + "\n")
intervals_list = "/opt/analysis/intervals.list"
gdb_path = os.path.join("/opt/analysis/", gdb)
gdb_import = ["--java-options", "-Xmx32G", "GenomicsDBImport",
"--genomicsdb-workspace-path", gdb_path,
"--sample-name-map", sample_map,
"-L", intervals_list,
"--max-num-intervals-to-import-in-parallel",
settings["processorNumber"]]
gdb_result = gatk(gdb_import)
if gdb_result.returncode != 0:
print(("An error ocurred when during genomics DB import. "
"Please see the {} for details.").format(errors_file))
# save command output
with open(errors_file, "ab") as outfile:
outfile.write(gdb_result.stderr)
# genotype gvcfs
genome_fasta = get_file_locations()[settings["species"]][
"fasta_genome"]
gdb = "gendb://" + gdb
if keep_control_mutant:
temp_vcf_file = vcf_file
else:
temp_vcf_file = "/opt/analysis/temp.vcf.gz"
genotype_gvcfs = ["GenotypeGVCFs", "-R", genome_fasta,
"-V", gdb, "-O", temp_vcf_file, "-L", intervals_list]
genotypes = gatk(genotype_gvcfs + options)
if genotypes.returncode != 0:
print(("An error ocurred during genotyping GVCFs. "
"Please see the {} for details.").format(errors_file))
# save command output
with open(errors_file, "ab") as outfile:
outfile.write(genotypes.stderr)
# remove control mutant sample if requested
if not keep_control_mutant:
res = subprocess.run(["bcftools", "view", "-s^control_mutant",
"-Oz", "-o", vcf_file, temp_vcf_file,
"--force-samples"],
stderr=subprocess.PIPE)
if res.returncode != 0:
print(("An error ocurred while removing control mutant. "
"Please see the {} for details.").format(errors_file))
# save command output
with open(errors_file, "ab") as outfile:
outfile.write(res.stderr)
# index the final vcf file
res = subprocess.run(["bcftools", "index", "-f", vcf_file],
stderr=subprocess.PIPE)
if res.returncode != 0:
print(("An error ocurred while indexing the final vcf file. "
"Please see the {} for details.").format(errors_file))
# save command output
with open(errors_file, "ab") as outfile:
outfile.write(res.stderr)
def vcf_to_tables_fb(vcf_file, settings=None, settings_file=None,
annotate=True, geneid_to_genename=None,
target_aa_annotation=None, aggregate_aminoacids=False,
target_nt_annotation=None, aggregate_nucleotides=False,
decompose_options=[], annotated_vcf=False,
aggregate_none=False, min_site_qual=-1,
min_target_site_qual=-1, min_genotype_qual=-1,
min_alt_qual=-1, min_ref_qual=-1, min_mean_alt_qual=-1,
min_mean_ref_qual=-1, output_prefix=""):
"""Create various tables from a vcf file.
Create various tables from a vcf file generated by the freebayes
program. There are 3 different types of count output for each variant:
variant count, reference count and coverage. The vcf file will be split
into biallelic variants. Table versions of the input vcf will be created
but the info fields will be limited to the mandatory vcf fields and some
annotation data if avaliable.
In addition to the original vcf table, aa change tables can be generated.
These will be generated by filtering the vcf to missense variants only,
decomposing block substitutions (haplotypes) and combining the counts for
the same aminoacid changes. This operation is specifically intended for
generating data for targeted missense mutations and only reports that. All
other variants, even those complex variants including targeted variants
will not be reported. Finally, one specific mutation (dhps-437) will have
reference counts instead of variant counts if present. This is because this
drug resistance variant is encoded by the 3d7 reference sequence.
Parameters
----------
settings: dict, None
Analysis settings dictionary. Either this or settings_file must
be provided.
settings_file: str/path, None
Path to the analysis settings file. Either this or the settings dict
must be provided.
annotate: bool, True
Annotate variant file. This is required for protein level analysis.
vcf_file: str/path
Starting vcf file.
geneid2genename: str/path, None.
Path to a tab separated tex file that maps gene ids to gene names.
Column names must be gene_id and gene_name. Gene IDs
will populate the Gene field if this file is not provided.
target_aa_annotation: str/path, None.
Path to a tab separated text file with targeted variant information to
annotate and label targeted amino acid changes.
It must have gene_name, aminoacid_change, and mutation_name columns.
Amino acid changes should be represented as refAAPosAltAA. refAA and
AltAA must be three letter amino acid codes.
This file is required for targeted protein variant labeling.
target_nt_annotation: str/path, None.
Path to a tab separated text file with targeted variant information to
annotate and label targeted nucleotide changes.
It must have CHROM, POS, REF, ALT, NAME columns.
This file is required for targeted nucleotide variant labeling.
aggregate_aminoacids: bool, False
whether counts for same amino acids should be aggregated. This involves
decomposing multi amino acid changes for missense variants. If amino
acid based targets will be annotated, based on a provided annotation
dictionary, aggregation step must be completed. Targeted mutations
that are part of complex events (indels, stop loss/gain etc.) will not
be labeled as targeted.
aggregate_nucleotides: bool, False
whether the counts for nucleotide changes should be aggregated. This
involves decomposing all variants to the smallest units possible,
breaking all haplotype data. The level of decomposition should be
specified with the decompose_options parameter.
aggregate_none: bool, False.
Do no aggregation on counts, save the original (annotated if requested)
vcf file as 3 count tables. Three aggregation options are compatible
with each other and can be used all at once.
decompose_options: list, []
if aggregate nucleotides option is selected, these options will be
passed to vt program. "-a" for decomposing variants containing indels,
for example. "-p" for keeping phase information. Any option to vt
decompose_blocksub would be valid. By default indels will not be
decomposed.
annotated_vcf: bool, False
is the provided vcf file annotated using snpEff. These annotations
will be used if no count aggregation is to be done and annotate option
is False.
min_site_qual: float, -1
Filter variants with QUAL values less than this value if the site is
not a targeted site. If targeted, the site will be kept regardless of
the qual value for the site. freebayes manual indicates that
simulations showed a value between 1-30 would be good. So a minimum
value of 1 here would clean up most junk sites.
min_target_site_qual: float, -1
If a variant site is targeted but the site qual is lower than this,
reset the alternate observation counts to 0. It may be best to leave
this at the default value since there is usually additional evidence
that a targeted variant exists in a samples compared to a de novo
variant.
"""
# get the analysis settings
# check if both settings and the settings file are None:
if (settings is None) and (settings_file is None):
print("settings or settings file must be provided for freebayes_call.")
return
else:
if settings is None:
settings = get_analysis_settings(settings_file)
else:
settings = copy.deepcopy(settings)
# get the working directory from settings
wdir = settings["workingDir"]
# All postprocessing steps require biallelic variant representation.
# so we'll use bcftools to split multiallelics to their own lines.
genome_fasta = get_file_locations()[settings["species"]]["fasta_genome"]
vcf_path = os.path.join(wdir, vcf_file)
split_vcf_path = os.path.join(wdir, output_prefix + "split." + vcf_file)
subprocess.run(["bcftools", "norm", "-f", genome_fasta, "-m-both",
vcf_path, "-Oz", "-o", split_vcf_path], check=True,
stderr=subprocess.PIPE)
subprocess.run(["bcftools", "index", "-f", split_vcf_path], check=True,
stderr=subprocess.PIPE)
# Will protein level aggregation be performed on the variants?
# This will only be done for simple missense variants but it is important
# to annotate the vcf file before breaking down the haplotypes.
if annotate:
annotated_vcf_path = os.path.join(wdir, output_prefix + "split.ann."
+ vcf_file)
res = annotate_vcf_file(settings, split_vcf_path, annotated_vcf_path)
if res != 0:
print("Annotating the vcf file failed.")
return
else:
annotated_vcf_path = split_vcf_path
if aggregate_aminoacids:
if not (annotate or annotated_vcf):
print("annotate option must be set to true or an annotadet vcf "
"file must be provided and annotated_vcf option must be "
"set to true for amino acid level aggregation. \n"
"Exiting!")
return
# check if a target annotation dict is provided.
target_annotation_dict = {}
if target_aa_annotation is not None:
taa = pd.read_table(target_aa_annotation).set_index(
["gene_name", "aminoacid_change"]).to_dict(orient="index")
for k in taa.keys():
target_annotation_dict[k] = taa[k]["mutation_name"]
# check if a gene id to gene name file is provided
gene_ids = {}
if geneid_to_genename is not None:
gids = pd.read_table(geneid_to_genename).set_index("gene_id")
gids = gids.to_dict(orient="index")
for g in gids:
gene_ids[g] = gids[g]["gene_name"]
# load annotated vcf file
variants = allel.read_vcf(annotated_vcf_path, fields=["*"],
alt_number=1,
transformers=allel.ANNTransformer())
# allel import provides a variants dictionary with keys such as
# variants/AD, variants/POS for variant level information
# the values are arrays with each element corresponding to one variant.
# similarly, calldata/GT type keys hold the genotype level data.
#############################################################
# Freebayes vcfs have AO and RO counts for alt and ref allele depths
# but GATK has a combined AD depth. Create AO and RO from AD if
# needed
try:
variants["calldata/AO"]
except KeyError:
variants["calldata/RO"] = variants["calldata/AD"][:, :, 0]
variants["calldata/AO"] = variants["calldata/AD"][:, :, 1]
# find missense variant locations in the data. We are going to split
# multi amino acid changes for missense variants only for target
# annotation and count aggregation.
missense = ["missense_variant" == variant for variant
in variants["variants/ANN_Annotation"]]
# spcecify fields of interest from the INFO fields
variant_fields = ["ANN_Gene_ID", "ANN_HGVS_p", "ANN_Annotation",
"QUAL"]
variant_fields = ["variants/" + v for v in variant_fields]
# specify fields of interest from individual level data
# that is basically the count data for tables. AO: alt allele count,
# RO ref count, DP: coverage.
call_data_fields = ['calldata/AO', 'calldata/RO', 'calldata/DP',
'calldata/GT', 'calldata/GQ', 'calldata/QA',
'calldata/QR']
variants["calldata/GT"] = variants["calldata/GT"].sum(axis=2)
# zip variant level information together, so we have a single value
# for each variant
variant_data = list(zip(*[variants[v] for v in variant_fields]))
# so now we have a list of length equal to variant number.
# each item is a tuple such as ('PF3D7_0104300', 'Gln107Leu') or
# ('PF3D7_0104300', 'AspGluAsp144HisGlnTyr'). We'll split these
# compound SNVs later.
# get count data for missense variants
call_data = list(zip(*[variants[c] for c in call_data_fields]))
# first item of the above list is alt counts, then ref counts and
# coverage.
#############################
# split the compound mutations
split_variants = []
split_calls = []
for i in range(len(missense)):
mv = variant_data[i][:3]
# get the aa change such as AspGluAsp144HisGlnTyr
aa_change = mv[1]
# if no aa change, skip
if aa_change == "":
continue
try:
# if a mapping dict is present, add the gene name
# this would get Pfubp1 from PF3D7_0104300, for example
gene_name = gene_ids[mv[0]]
except KeyError:
gene_name = mv[0]
# get site quality, remove those not satisfying min_site_qual
# unless they are targeted mutations
site_qual = float(variant_data[i][3])
if missense[i]:
# get the position of the change (144 above)
aa_pos = int("".join([c for c in aa_change if c.isdigit()]))
# split the aa change to reference aminoacid sequence and
# alt amino acid sequence.
aa_split = aa_change.split(str(aa_pos))
reference = aa_split[0]
alternate = aa_split[1]
# aa changes are in 3 letter format. Loop through each aa and
# split to single aa changes.
for j in range(0, len(reference), 3):
new_pos = int(aa_pos + j/3)
# convert single amino acid names to 1 letter code.
new_reference = reference[j:j+3]
new_alternate = alternate[j:j+3]
new_change = new_reference + str(new_pos) + new_alternate
try:
# if this variant is in the targets, annotate it so.
mut_name = target_annotation_dict[
(gene_name, new_change)]
targeted_mutation = "Yes"
# reset alt observation counts to 0 if quality is low
if site_qual < min_target_site_qual:
call_data[i][0][:] = 0
except KeyError:
# remove low quality non-target alleles as well as
# synonymous changes
if ((site_qual < min_site_qual)
or (new_reference == new_alternate)):
continue
mut_name = gene_name + "-" + new_change
targeted_mutation = "No"
# add the split variant information split variants list
split_variants.append(mv + (new_change, gene_name,
mut_name, targeted_mutation))
# add the individual level data to split calls list.
split_calls.append(call_data[i])
else:
try:
# if this variant is in the targets, annotate it as such.
mut_name = target_annotation_dict[
(gene_name, aa_change)]
targeted_mutation = "Yes"
if site_qual < min_target_site_qual:
call_data[i][0][:] = 0
except KeyError:
# remove low qual or synonymous changes
if ((site_qual < min_site_qual)
or (mv[2] == "synonymous_variant")):
continue
mut_name = gene_name + "-" + aa_change
targeted_mutation = "No"
# add compound variant data to split variant data
split_variants.append(mv + (aa_change, gene_name,
mut_name, targeted_mutation))
# add the individual level data to split calls list.
split_calls.append(call_data[i])
# get individual level data
genotype_quals = call_data[i][4]
ao_count = call_data[i][0]
alt_quals = call_data[i][5]
average_alt_quals = alt_quals / ao_count
ro_count = call_data[i][1]
ref_quals = call_data[i][6]
average_ref_quals = ref_quals / ro_count
gq_mask = genotype_quals < min_genotype_qual
qa_mask = alt_quals < min_alt_qual
qr_mask = ref_quals < min_ref_qual
av_qa_mask = average_alt_quals < min_mean_alt_qual
av_qr_mask = average_ref_quals < min_mean_ref_qual
# replace count data for individuals failing quality thresholds
# alt allele count AO
call_data[i][0][qa_mask] = 0
call_data[i][0][av_qa_mask] = 0
# ref allele count RO
call_data[i][1][qr_mask] = 0
call_data[i][1][av_qr_mask] = 0
# reset coverage for gq failure
call_data[i][2][gq_mask] = 0
# reset genotypes for gq failure
call_data[i][3][gq_mask] = -2
# create a multiindex for the variant df that we'll create next
index = pd.MultiIndex.from_tuples(
split_variants, names=["Gene ID", "Compound Change", "ExonicFunc",
"AA Change", "Gene", "Mutation Name",
"Targeted"])
# get alt counts
variant_counts = pd.DataFrame(np.array(split_calls)[:, 0],
columns=variants["samples"],
index=index).replace(-1, 0)
# get reference counts
reference_counts = pd.DataFrame(np.array(split_calls)[:, 1],
columns=variants["samples"],
index=index).replace(-1, 0)
# get coverage depth
coverage = pd.DataFrame(np.array(split_calls)[:, 2],
columns=variants["samples"],
index=index).replace(-1, 0)
# combine counts for same changes
grouping_keys = ["Gene ID", "Gene", "Mutation Name", "ExonicFunc",
"AA Change", "Targeted"]
# replace -1 (allel assigned NA values) values with 0
# sum alt counts
mutation_counts = variant_counts.groupby(grouping_keys).sum()
# take the max of ref counts
mutation_refs = reference_counts.groupby(grouping_keys).min()
# take the max of coverage counts
mutation_coverage = coverage.groupby(grouping_keys).max()
# due to aggregating aa changes, ref counts can be overcounted even
# if the minimum ref count is taken for the aggregate. The reason for
# this is that each nucleotide variant's reference observation count
# may include the alternate alleles for another nucleotide variant
# that codes for the same aa change. So we'll set the ref counts
# to coverage - alt count where ref count exceeds this value
diff_count = mutation_coverage - mutation_counts
ref_difference = (mutation_refs > diff_count).sum()
# get the variant indices where ref count exceeds coverage - alt count
exceed_index = ref_difference.loc[ref_difference > 0].index
mutation_refs.loc[:, exceed_index] = diff_count.loc[:, exceed_index]
# get genotypes as called by the variant caller
gt_calls = pd.DataFrame((np.array(split_calls)[:, 3]),
columns=variants["samples"],
index=index)
def combine_gt(g):
if 1 in g.values:
return 1
elif 0 in g.values:
if 2 in g.values:
return 1
else:
return 0
elif 2 in g.values:
return 2
else:
return -1
gt_calls = gt_calls.groupby(grouping_keys).agg(combine_gt)
# for one pf mutation alt count will be replaced with ref count
# because reference allele is drug resistant
dhps_key = ("<KEY>", "dhps", "dhps-Gly437Ala",
"missense_variant", "Gly437Ala", "Yes")
dhps_new_key = ("<KEY>", "dhps", "dhps-Ala437Gly",
"missense_variant", "Ala437Gly", "Yes")
try:
mutation_counts.loc[dhps_new_key, :] = mutation_refs.loc[
dhps_key, :]
mutation_refs.loc[dhps_new_key, :] = mutation_counts.loc[
dhps_key, :]
mutation_coverage.loc[dhps_new_key, :] = mutation_coverage.loc[
dhps_key, :]
gt_calls.loc[dhps_new_key, :] = gt_calls.loc[
dhps_key, :].replace({2: 0, 0: 2})
gt_calls.drop(dhps_key, inplace=True)
mutation_counts.drop(dhps_key, inplace=True)
mutation_refs.drop(dhps_key, inplace=True)
mutation_coverage.drop(dhps_key, inplace=True)
mutation_counts = mutation_counts.sort_index()
mutation_refs = mutation_refs.sort_index()
mutation_coverage = mutation_coverage.sort_index()
gt_calls = gt_calls.sort_index()
except KeyError:
pass
# save count tables
mutation_counts.T.to_csv(os.path.join(wdir, output_prefix
+ "alternate_AA_table.csv"))
mutation_refs.T.to_csv(os.path.join(wdir, output_prefix
+ "reference_AA_table.csv"))
mutation_coverage.T.to_csv(os.path.join(wdir, output_prefix
+ "coverage_AA_table.csv"))
gt_calls.T.to_csv(os.path.join(wdir, output_prefix
+ "genotypes_AA_table.csv"))
if aggregate_nucleotides:
# aggregating counts of nucleotides requires decomposing block
# substitutions, at a minimum. If desired, complex variants involving
# indels can be decomposed as well.
decomposed_vcf = os.path.join(wdir, output_prefix
+ "decomposed." + vcf_file)
# prepare vt decompose command
comm = ["vt", "decompose_blocksub"] + decompose_options
comm.append(split_vcf_path)
comm.extend(["-o", decomposed_vcf])
# run decompose
subprocess.run(comm, check=True)
subprocess.run(["bcftools", "index", "-f", decomposed_vcf], check=True)
# load decomposed vcf file
variants = allel.read_vcf(decomposed_vcf, fields=["*"], alt_number=1)
# Freebayes vcfs have AO and RO counts for alt and ref allele depths
# but GATK has a combined AD depth. Create AO and RO from AD if
# needed
try:
variants["calldata/AO"]
except KeyError:
variants["calldata/RO"] = variants["calldata/AD"][:, :, 0]
variants["calldata/AO"] = variants["calldata/AD"][:, :, 1]
# spcecify fields of interest from the INFO fields
variant_fields = ["CHROM", "POS", "REF", "ALT", "QUAL"]
variant_fields = ["variants/" + v for v in variant_fields]
# specify fields of interest from individual level data
# that is basically the count data for tables. AO: alt allele count,
# RO ref count, DP: coverage.
call_data_fields = ['calldata/AO', 'calldata/RO', 'calldata/DP',
'calldata/GT', 'calldata/GQ', 'calldata/QA',
'calldata/QR']
variants["calldata/GT"] = variants["calldata/GT"].sum(axis=2)
# zip variant level information together, so we have a single value
# for each variant
variant_data = list(zip(*[variants[v] for v in variant_fields]))
# get count data for the variants
call_data = list(zip(*[variants[c] for c in call_data_fields]))
# check if a target annotation dict is provided.
target_annotation_dict = {}
if target_nt_annotation is not None:
taa = pd.read_table(target_nt_annotation).set_index(
["CHROM", "POS", "REF", "ALT"]).to_dict(orient="index")
for k in taa.keys():
target_annotation_dict[k] = taa[k]["mutation_name"]
grouping_keys = ["CHROM", "POS", "REF", "ALT", "Mutation Name",
"Targeted"]
split_variants = []
split_calls = []
for i in range(len(variant_data)):
vd = variant_data[i][:4]
site_qual = float(variant_data[i][4])
try:
t_anno = target_annotation_dict[vd]
targeted_mutation = "Yes"
if site_qual < min_target_site_qual:
call_data[i][0][:] = 0
except KeyError:
# remove low qual and nonvariant sites
if ((site_qual < min_site_qual) or (vd[2] == vd[3])):
continue
t_anno = ":".join(map(str, vd))
targeted_mutation = "No"
split_variants.append(vd + (t_anno, targeted_mutation))
split_calls.append(call_data[i])
# get individual level data
genotype_quals = call_data[i][4]
ao_count = call_data[i][0]
alt_quals = call_data[i][5]
average_alt_quals = alt_quals / ao_count
ro_count = call_data[i][1]
ref_quals = call_data[i][6]
average_ref_quals = ref_quals / ro_count
gq_mask = genotype_quals < min_genotype_qual
qa_mask = alt_quals < min_alt_qual
qr_mask = ref_quals < min_ref_qual
av_qa_mask = average_alt_quals < min_mean_alt_qual
av_qr_mask = average_ref_quals < min_mean_ref_qual
# replace count data for individuals failing quality thresholds
# alt allele count AO
call_data[i][0][qa_mask] = 0
call_data[i][0][av_qa_mask] = 0
# ref allele count RO
call_data[i][1][qr_mask] = 0
call_data[i][1][av_qr_mask] = 0
# reset coverage for gq failure
call_data[i][2][gq_mask] = 0
# reset genotypes for gq failure
call_data[i][3][gq_mask] = -2
# first item of the above list is alt counts, then ref counts and
# coverage.
#############################
# create a multiindex for the variant df that we'll create next
index = pd.MultiIndex.from_tuples(
split_variants, names=grouping_keys)
# get alt counts
variant_counts = pd.DataFrame(np.array(split_calls)[:, 0],
columns=variants["samples"],
index=index).replace(-1, 0)
# get reference counts
reference_counts = pd.DataFrame(np.array(split_calls)[:, 1],
columns=variants["samples"],
index=index).replace(-1, 0)
# get coverage depth
coverage = pd.DataFrame(np.array(split_calls)[:, 2],
columns=variants["samples"],
index=index).replace(-1, 0)
# combine counts for same changes
# sum alt counts
mutation_counts = variant_counts.groupby(grouping_keys).sum()
# take the max of ref counts
mutation_refs = reference_counts.groupby(grouping_keys).min()
# take the max of coverage counts
mutation_coverage = coverage.groupby(grouping_keys).max()
# save count tables
mutation_counts.T.to_csv(os.path.join(wdir, output_prefix
+ "alternate_AN_table.csv"))
mutation_refs.T.to_csv(os.path.join(wdir, output_prefix
+ "reference_AN_table.csv"))
mutation_coverage.T.to_csv(os.path.join(wdir, output_prefix
+ "coverage_AN_table.csv"))
# get genotypes
gt_calls = pd.DataFrame((np.array(split_calls)[:, 3]),
columns=variants["samples"],
index=index)
def combine_gt(g):
if 1 in g.values:
return 1
elif 0 in g.values:
if 2 in g.values:
return 1
else:
return 0
elif 2 in g.values:
return 2
else:
return -1
gt_calls = gt_calls.groupby(grouping_keys).agg(combine_gt)
gt_calls.T.to_csv(os.path.join(wdir, output_prefix
+ "genotypes_AN_table.csv"))
if aggregate_none:
# if no aggregation will be done, load the vcf file
if annotate or annotated_vcf:
# if annotation was requested use the annotated vcf path
variants = allel.read_vcf(annotated_vcf_path, fields=["*"],
alt_number=1,
transformers=allel.ANNTransformer())
else:
# if the file is not annotated, don't try to parse ANN field.
variants = allel.read_vcf(annotated_vcf_path, fields=["*"],
alt_number=1)
# Freebayes vcfs have AO and RO counts for alt and ref allele depths
# but GATK has a combined AD depth. Create AO and RO from AD if
# needed
try:
variants["calldata/AO"]
except KeyError:
variants["calldata/RO"] = variants["calldata/AD"][:, :, 0]
variants["calldata/AO"] = variants["calldata/AD"][:, :, 1]
variant_fields = ["CHROM", "POS", "REF", "ALT", "QUAL"]
if annotate or annotated_vcf:
variant_fields.extend(["ANN_Gene_ID", "ANN_HGVS_p"])
variant_fields = ["variants/" + v for v in variant_fields]
# specify fields of interest from individual level data
# that is basically the count data for tables. AO: alt allele count,
# RO ref count, DP: coverage.
call_data_fields = ['calldata/AO', 'calldata/RO', 'calldata/DP',
'calldata/GT', 'calldata/GQ', 'calldata/QA',
'calldata/QR']
variants["calldata/GT"] = variants["calldata/GT"].sum(axis=2)
# zip variant level information together, so we have a single value
# for each variant
variant_data = list(zip(*[variants[v] for v in variant_fields]))
# get count data for the variants
call_data = list(zip(*[variants[c] for c in call_data_fields]))
split_variants = []
split_calls = []
for i in range(len(variant_data)):
vd = variant_data[i][:4]
site_qual = float(variant_data[i][4])
if site_qual < min_site_qual:
continue
if annotate or annotated_vcf:
g_ann = variant_data[i][5]
p_ann = variant_data[i][6]
if p_ann == "":
p_ann = "."
if g_ann == "":
g_ann = "."
else:
p_ann = "."
g_ann = "."
vd = vd + (g_ann, p_ann)
split_variants.append(vd)
split_calls.append(call_data[i])
# get individual level data
genotype_quals = call_data[i][4]
ao_count = call_data[i][0]
alt_quals = call_data[i][5]
average_alt_quals = alt_quals / ao_count
ro_count = call_data[i][1]
ref_quals = call_data[i][6]
average_ref_quals = ref_quals / ro_count
gq_mask = genotype_quals < min_genotype_qual
qa_mask = alt_quals < min_alt_qual
qr_mask = ref_quals < min_ref_qual
av_qa_mask = average_alt_quals < min_mean_alt_qual
av_qr_mask = average_ref_quals < min_mean_ref_qual
# replace count data for individuals failing quality thresholds
# alt allele count AO
call_data[i][0][qa_mask] = 0
call_data[i][0][av_qa_mask] = 0
# ref allele count RO
call_data[i][1][qr_mask] = 0
call_data[i][1][av_qr_mask] = 0
# reset coverage for gq failure
call_data[i][2][gq_mask] = 0
# reset genotypes for gq failure
call_data[i][3][gq_mask] = -2
# first item of the above list is alt counts, then ref counts and
# coverage.
#############################
# create a multiindex for the variant df that we'll create next
variant_fields = variant_fields[:4] + [
"variants/Gene ID", "variants/AA Change"]
index = pd.MultiIndex.from_tuples(split_variants,
names=[v.split("variants/")[1]
for v in variant_fields])
# get alt counts
variant_counts = pd.DataFrame(np.array(split_calls)[:, 0],
columns=variants["samples"],
index=index).replace(-1, 0)
# get reference counts
reference_counts = pd.DataFrame(np.array(split_calls)[:, 1],
columns=variants["samples"],
index=index).replace(-1, 0)
# get coverage depth
coverage = pd.DataFrame(np.array(split_calls)[:, 2],
columns=variants["samples"],
index=index).replace(-1, 0)
# save count tables
variant_counts.T.to_csv(os.path.join(wdir, output_prefix
+ "alternate_table.csv"))
reference_counts.T.to_csv(os.path.join(wdir, output_prefix
+ "reference_table.csv"))
coverage.T.to_csv(os.path.join(wdir, output_prefix
+ "coverage_table.csv"))
# get genotypes
gt_calls = pd.DataFrame((np.array(split_calls)[:, 3]),
columns=variants["samples"],
index=index).replace(-2, -1)
gt_calls.T.to_csv(os.path.join(wdir, output_prefix
+ "genotypes_table.csv"))
def vcf_to_tables(vcf_file, settings=None, settings_file=None, annotate=True,
geneid_to_genename=None, target_aa_annotation=None,
aggregate_aminoacids=False, target_nt_annotation=None,
aggregate_nucleotides=False, decompose_options=[],
annotated_vcf=False, aggregate_none=False, min_site_qual=-1,
min_target_site_qual=-1, min_genotype_qual=None,
output_prefix=""):
"""Create various tables from a vcf file.
Create various tables from a vcf file generated by the freebayes
program. There are 3 different types of count output for each variant:
variant count, reference count and coverage. The vcf file will be split
into biallelic variants. Table versions of the input vcf will be created
but the info fields will be limited to the mandatory vcf fields and some
annotation data if avaliable.
In addition to the original vcf table, aa change tables can be generated.
These will be generated by filtering the vcf to missense variants only,
decomposing block substitutions (haplotypes) and combining the counts for
the same aminoacid changes. This operation is specifically intended for
generating data for targeted missense mutations and only reports that. All
other variants, even those complex variants including targeted variants
will not be reported. Finally, one specific mutation (dhps-437) will have
reference counts instead of variant counts if present. This is because this
drug resistance variant is encoded by the 3d7 reference sequence.
Parameters
----------
settings: dict, None
Analysis settings dictionary. Either this or settings_file must
be provided.
settings_file: str/path, None
Path to the analysis settings file. Either this or the settings dict
must be provided.
annotate: bool, True
Annotate variant file. This is required for protein level analysis.
vcf_file: str/path
Starting vcf file.
geneid2genename: str/path, None.
Path to a tab separated tex file that maps gene ids to gene names.
Column names must be gene_id and gene_name. Gene IDs
will populate the Gene field if this file is not provided.
target_aa_annotation: str/path, None.
Path to a tab separated text file with targeted variant information to
annotate and label targeted amino acid changes.
It must have gene_name, aminoacid_change, and mutation_name columns.
Amino acid changes should be represented as refAAPosAltAA. refAA and
AltAA must be three letter amino acid codes.
This file is required for targeted protein variant labeling.
target_nt_annotation: str/path, None.
Path to a tab separated text file with targeted variant information to
annotate and label targeted nucleotide changes.
It must have CHROM, POS, REF, ALT, NAME columns.
This file is required for targeted nucleotide variant labeling.
aggregate_aminoacids: bool, False
whether counts for same amino acids should be aggregated. This involves
decomposing multi amino acid changes for missense variants. If amino
acid based targets will be annotated, based on a provided annotation
dictionary, aggregation step must be completed. Targeted mutations
that are part of complex events (indels, stop loss/gain etc.) will not
be labeled as targeted.
aggregate_nucleotides: bool, False
whether the counts for nucleotide changes should be aggregated. This
involves decomposing all variants to the smallest units possible,
breaking all haplotype data. The level of decomposition should be
specified with the decompose_options parameter.
aggregate_none: bool, False.
Do no aggregation on counts, save the original (annotated if requested)
vcf file as 3 count tables. Three aggregation options are compatible
with each other and can be used all at once.
decompose_options: list, []
if aggregate nucleotides option is selected, these options will be
passed to vt program. "-a" for decomposing variants containing indels,
for example. "-p" for keeping phase information. Any option to vt
decompose_blocksub would be valid. By default indels will not be
decomposed.
annotated_vcf: bool, False
is the provided vcf file annotated using snpEff. These annotations
will be used if no count aggregation is to be done and annotate option
is False.
min_site_qual: float, -1
Filter variants with QUAL values less than this value if the site is
not a targeted site. If targeted, the site will be kept regardless of
the qual value for the site. freebayes manual indicates that
simulations showed a value between 1-30 would be good. So a minimum
value of 1 here would clean up most junk sites.
min_target_site_qual: float, -1
If a variant site is targeted but the site qual is lower than this,
reset the alternate observation counts to 0. It may be best to leave
this at the default value since there is usually additional evidence
that a targeted variant exists in a samples compared to a de novo
variant.
"""
# get the analysis settings
# check if both settings and the settings file are None:
if (settings is None) and (settings_file is None):
print("settings or settings file must be provided for freebayes_call.")
return
else:
if settings is None:
settings = get_analysis_settings(settings_file)
else:
settings = copy.deepcopy(settings)
# get the working directory from settings
wdir = settings["workingDir"]
# All postprocessing steps require biallelic variant representation.
# so we'll use bcftools to split multiallelics to their own lines.
genome_fasta = get_file_locations()[settings["species"]]["fasta_genome"]
vcf_path = os.path.join(wdir, vcf_file)
# filter genotype for quality if specified
if min_genotype_qual is not None:
if vcf_file.endswith(".gz"):
vtype = "--gzvcf"
else:
vtype = "--vcf"
filt_res = subprocess.Popen(["vcftools", vtype, vcf_path,
"--minGQ", str(min_genotype_qual),
"--recode", "--recode-INFO-all",
"--stdout"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
filt_vcf_path = os.path.join(
wdir, output_prefix + "variants.GQ."
+ str(min_genotype_qual) + ".vcf.gz")
with open(filt_vcf_path, "wb") as outfile:
zip_res = subprocess.run(["bgzip", "-f"], stdin=filt_res.stdout,
stdout=outfile,
stderr=subprocess.PIPE)
index_res = subprocess.run(
["bcftools", "index", "-f", filt_vcf_path],
stderr=subprocess.PIPE)
if zip_res.returncode != 0:
print(("Compression of GQ filtered vcf failed due to "
"error: {}. \n Genotypes will not be "
"filtered.").format(zip_res.stderr))
elif index_res.returncode != 0:
print(("Indexing GQ filtered vcf file failed "
"due to error: {}. \n Genotypes will not "
"be filtered.").format(index_res.stderr))
else:
vcf_path = filt_vcf_path
split_vcf_path = os.path.join(wdir, output_prefix + "split." + vcf_file)
subprocess.run(["bcftools", "norm", "-f", genome_fasta, "-m-both",
vcf_path, "-Oz", "-o", split_vcf_path], check=True,
stderr=subprocess.PIPE)
subprocess.run(["bcftools", "index", "-f", split_vcf_path], check=True,
stderr=subprocess.PIPE)
# Will protein level aggregation be performed on the variants?
# This will only be done for simple missense variants but it is important
# to annotate the vcf file before breaking down the haplotypes.
if annotate:
annotated_vcf_path = os.path.join(wdir, output_prefix + "split.ann."
+ vcf_file)
res = annotate_vcf_file(settings, split_vcf_path, annotated_vcf_path)
if res != 0:
print("Annotating the vcf file failed.")
return
else:
annotated_vcf_path = split_vcf_path
if aggregate_aminoacids:
if not (annotate or annotated_vcf):
print("annotate option must be set to true or an annotadet vcf "
"file must be provided and annotated_vcf option must be "
"set to true for amino acid level aggregation. \n"
"Exiting!")
return
# check if a target annotation dict is provided.
target_annotation_dict = {}
if target_aa_annotation is not None:
taa = pd.read_table(target_aa_annotation).set_index(
["gene_name", "aminoacid_change"]).to_dict(orient="index")
for k in taa.keys():
target_annotation_dict[k] = taa[k]["mutation_name"]
# check if a gene id to gene name file is provided
gene_ids = {}
if geneid_to_genename is not None:
gids = | pd.read_table(geneid_to_genename) | pandas.read_table |
import wradlib as wrl
import numpy as np
import xarray as xr
import pandas as pd
import tarfile
from tqdm import tqdm
def read_azi_tgz_files_to_xarray_dataset(fn_list,
elevation,
r=None,
az=None,
check_N_az=None,
radar_location=None):
""" Read azi tgz files and parse them into a xarray DataSet
Parameters
----------
fn_list : list
List of paths to .azi files
elevation : float
Radar beam elevation in degree
r : array
Radar beam ranges in meter
az : array
Radar beam azimuth in degree from north
check_N_az : int
Check number of az values for each file. Skip file if there is a
mismatch
radar_location : tuple of floats
Radar location of the form (latitude, longitude, altitude)
Returns
-------
xarray.DataSet with radar data and metadata
"""
# Read the first azi file to get the metadata required for deriving
# latitude, longitude and altitude
if check_N_az is None:
with tarfile.open(fn_list[0]) as tar:
f = tar.extractfile(tar.getmembers()[0])
r, az, radar_location = _get_r_az_loc(f)
# If check_N_az is given, iterate over azi files in first tgz file
# until az is found with the correct length
else:
with tarfile.open(fn_list[0]) as tar:
for file_in_tar in tar.getmembers():
f = tar.extractfile(file_in_tar)
r, az, radar_location = _get_r_az_loc(f)
if len(az) == check_N_az:
break
else:
'Trying to get metadata from %s' % f
# Overwrite r, az, location with arguments if supplied
if r is not None:
r = r
if az is not None:
az = az
if radar_location is not None:
radar_location = radar_location
# Build 2D grids for r and az
#r_grid, az_grid = np.meshgrid(r, az)
xyz = wrl.georef.spherical_to_proj(r, az, elevation, radar_location)
lons = xyz[:, :, 0]
lats = xyz[:, :, 1]
data_list = []
metadata_list = []
for fn in fn_list:
with tarfile.open(fn) as tar:
for tarinfo in tqdm(tar, desc=('Reading ' + fn)):
f = tar.extractfile(tarinfo)
temp_data, temp_metadata = read_azi_file(f)
if check_N_az is not None:
if temp_data.shape[0] != check_N_az:
print(('N_az = %d instead of %d. --> Skipping %s' %
(temp_data.shape[0], check_N_az, fn)))
continue
if len(temp_metadata['az']) != check_N_az:
print('N_az = %d instead of %d. --> Skipping %s' % (
len(temp_metadata['az']), check_N_az, fn))
continue
data_list.append(temp_data)
metadata_list.append(temp_metadata)
time_list = [ | pd.to_datetime(metadata['date'] + ' ' + metadata['time']) | pandas.to_datetime |
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2020, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
#import os
import numpy as np
import pandas as pd
from unittest import TestCase
from exatomic import gaussian
from exatomic.base import resource
from exatomic.gaussian import Output, Fchk
class TestFchk(TestCase):
def setUp(self):
self.mam1 = Fchk(resource('g09-ch3nh2-631g.fchk'))
self.mam2 = Fchk(resource('g09-ch3nh2-augccpvdz.fchk'))
self.mam3 = Fchk(resource('g16-methyloxirane-def2tzvp-freq.fchk'))
self.mam4 = Fchk(resource('g16-h2o2-def2tzvp-freq.fchk'))
self.nitro_nmr = Fchk(resource('g16-nitromalonamide-6-31++g-nmr.fchk'))
def test_parse_atom(self):
self.mam1.parse_atom()
self.assertEqual(self.mam1.atom.shape[0], 7)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam1.atom))))
self.mam2.parse_atom()
self.assertEqual(self.mam2.atom.shape[0], 7)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam2.atom))))
def test_parse_basis_set(self):
self.mam1.parse_basis_set()
self.assertEqual(self.mam1.basis_set.shape[0], 32)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam1.basis_set))))
self.mam2.parse_basis_set()
self.assertEqual(self.mam2.basis_set.shape[0], 53)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam2.basis_set))))
def test_parse_orbital(self):
self.mam1.parse_orbital()
self.assertEqual(self.mam1.orbital.shape[0], 28)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam1.orbital))))
self.mam2.parse_orbital()
self.assertEqual(self.mam2.orbital.shape[0], 91)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam2.orbital))))
def test_parse_momatrix(self):
self.mam1.parse_momatrix()
self.assertEqual(self.mam1.momatrix.shape[0], 784)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam1.momatrix))))
self.mam2.parse_momatrix()
self.assertEqual(self.mam2.momatrix.shape[0], 8281)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam2.momatrix))))
def test_parse_basis_set_order(self):
self.mam1.parse_basis_set_order()
self.assertEqual(self.mam1.basis_set_order.shape[0], 28)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam1.basis_set_order))))
self.mam2.parse_basis_set_order()
self.assertEqual(self.mam2.basis_set_order.shape[0], 91)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam2.basis_set_order))))
def test_parse_frame(self):
self.mam1.parse_frame()
self.assertEqual(self.mam1.frame.shape[0], 1)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam1.frame))))
self.mam2.parse_frame()
self.assertEqual(self.mam2.frame.shape[0], 1)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam2.frame))))
def test_parse_frequency(self):
self.mam3.parse_frequency()
self.assertEqual(self.mam3.frequency.shape[0], 240)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam3.frequency))))
self.mam4.parse_frequency()
self.assertEqual(self.mam4.frequency.shape[0], 24)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam4.frequency))))
def test_parse_frequency_ext(self):
self.mam3.parse_frequency_ext()
self.assertEqual(self.mam3.frequency_ext.shape[0], 24)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam3.frequency_ext))))
self.mam4.parse_frequency_ext()
self.assertEqual(self.mam4.frequency_ext.shape[0], 6)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam4.frequency_ext))))
def test_parse_gradient(self):
self.mam3.parse_gradient()
self.assertEqual(self.mam3.gradient.shape[0], 10)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam3.gradient))))
self.mam4.parse_gradient()
self.assertEqual(self.mam4.gradient.shape[0], 4)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam4.gradient))))
def test_shielding_tensor(self):
self.nitro_nmr.parse_nmr_shielding()
self.assertEqual(self.nitro_nmr.nmr_shielding.shape[0], 15)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.nitro_nmr.nmr_shielding))))
def test_to_universe(self):
"""Test the to_universe method."""
mam1 = self.mam1.to_universe(ignore=True)
mam2 = self.mam2.to_universe(ignore=True)
for uni in [mam1, mam2]:
# cannot add frequency and frequency_ext attributes as they require
# very specific inputs
for attr in ['atom', 'basis_set', 'basis_set_order',
'momatrix', 'orbital', 'frame']:
self.assertTrue(hasattr(uni, attr))
class TestOutput(TestCase):
"""
This test ensures that the parsing functionality works on
a smattering of output files that were generated with the
Gaussian software package. Target syntax is for Gaussian
09.
"""
def setUp(self):
# TODO : add some cartesian basis set files
self.uo2 = Output(resource('g09-uo2.out'))
self.mam3 = Output(resource('g09-ch3nh2-631g.out'))
self.mam4 = Output(resource('g09-ch3nh2-augccpvdz.out'))
# need two because of the current limitations in the parse_frequency code
self.meth_opt = Output(resource('g16-methyloxirane-def2tzvp-opt.out'))
self.meth_freq = Output(resource('g16-methyloxirane-def2tzvp-freq.out'))
self.nap_tddft = Output(resource('g16-naproxen-def2tzvp-tddft.out'))
self.h2o2_tddft = Output(resource('g16-h2o2-def2tzvp-tddft.out'))
self.nap_opt = Output(resource('g16-naproxen-def2tzvp-opt.out'))
self.nitro_nmr = Output(resource('g16-nitromalonamide-6-31++g-nmr.out'))
# to test having both a geometry optimization and frequencies calculation
self.meth_opt_freq_hp = Output(resource('g16-methyloxirane-def2tzvp-opt-freq.out'))
def test_parse_atom(self):
self.uo2.parse_atom()
self.assertEqual(self.uo2.atom.shape[0], 3)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.uo2.atom))))
self.mam3.parse_atom()
self.assertEqual(self.mam3.atom.shape[0], 7)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam3.atom))))
self.mam4.parse_atom()
self.assertEqual(self.mam4.atom.shape[0], 7)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam4.atom))))
self.meth_opt.parse_atom()
self.assertEqual(self.meth_opt.atom.shape[0], 120)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.meth_opt.atom))))
self.nap_opt.parse_atom()
self.assertEqual(self.nap_opt.atom.shape[0], 806)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.nap_opt.atom))))
self.meth_opt_freq_hp.parse_atom()
self.assertEqual(self.meth_opt_freq_hp.atom.shape[0], 130)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.meth_opt_freq_hp.atom))))
def test_parse_basis_set(self):
self.uo2.parse_basis_set()
self.assertEqual(self.uo2.basis_set.shape[0], 49)
self.assertTrue(np.all(pd.notnull( | pd.DataFrame(self.uo2.basis_set) | pandas.DataFrame |
import sqlite3
import time
import public_function as pb_fnc
import pandas as pd
import numpy as np
class InfoCheck:
bu_name = ""
db_path = "../data/_DB/"
def __init__(self, bu):
self.__class__.bu_name = bu
# get all master data of single code
def get_single_code_all_master_data(self, material_code, master_data_item_list):
str_master_data = ""
for item in master_data_item_list:
str_master_data += item + ","
str_master_data = str_master_data.rstrip(",")
database_fullname = self.__class__.db_path + self.__class__.bu_name + "_Master_Data.db"
datasheet_name = self.__class__.bu_name + "_Master_Data"
conn = sqlite3.connect(database_fullname)
c = conn.cursor()
sql_cmd = 'SELECT %s FROM %s WHERE Material=\"%s\"' % (str_master_data, datasheet_name, material_code)
c.execute(sql_cmd)
result = c.fetchall()
if result:
return result[0]
else:
return 0
# 读取单个代码全部的master data
def get_master_data(self, code):
# 数据库完整路径加名称
db_fullname = self.__class__.db_path + "Master_Data.db"
# 表格名称,等于文件名称
table_name = "MATERIAL_MASTER"
conn = sqlite3.connect(db_fullname)
c = conn.cursor()
str_cmd = "SELECT Description, Chinese_Description, Hierarchy_4, Hierarchy_5, Sales_Status, Purchase_Status, " \
"Standard_Cost FROM " + table_name + " WHERE Material = \'" + code + "\' "
c.execute(str_cmd)
row = c.fetchall()
list_title = ["Description", "Chinese_Description", "Hierarchy_4", "Hierarchy_5", "Sales_Status",
"Purchase_Status", "Standard_Cost"]
return [list_title, list(row[0])]
# 读取全部的master data list
def get_master_data_list(self):
# 文件名,无后缀
file_name = self.__class__.bu_name + "_Master_Data"
# 数据库完整路径加名称
db_fullname = self.__class__.db_path + file_name + ".db"
# 表格名称,等于文件名称
tbl_name = file_name
conn = sqlite3.connect(db_fullname)
c = conn.cursor()
result = c.execute("SELECT * from " + tbl_name)
row = result.fetchall()
conn.close()
return list(row)
# get master data for code list, phase out
# def get_master_data_for_list(self, code_list, master_data_name):
# file_name = self.__class__.bu_name + "_Master_Data" if master_data_name == "SAP_Price" else "Master_Data"
# db_fullname = self.__class__.db_path + file_name + ".db"
# table_name = self.__class__.bu_name + "_SAP_Price" if master_data_name == "SAP_Price" else "MATERIAL_MASTER"
# master_data_result = []
# conn = sqlite3.connect(db_fullname)
# c = conn.cursor()
# for code_name in code_list:
# if master_data_name == "SAP_Price":
# sql_cmd = "SELECT Price FROM " + table_name + " WHERE Material = \'" + code_name + "\'"
# else:
# sql_cmd = "SELECT " + master_data_name + " FROM " + table_name + " WHERE Material = \'" + code_name + "\'"
# c.execute(sql_cmd)
# master_data_output = c.fetchall()
# if master_data_output:
# master_data_result.append(master_data_output[0][0])
# else:
# master_data_result.append(0)
# return master_data_result
# get single column from bu master data
def get_bu_master_data(self, code, column_name):
file_name = self.__class__.bu_name + "_Master_Data"
db_fullname = self.__class__.db_path + file_name + ".db"
conn = sqlite3.connect(db_fullname)
c = conn.cursor()
sql_cmd = 'SELECT %s FROM %s WHERE Material = \"%s\"' % (column_name, file_name, code)
c.execute(sql_cmd)
md_result = c.fetchall()
if md_result:
return md_result[0][0]
else:
return ""
# by H5的销量数据
def get_h5_sales_data(self, data_type, price_type, hierarchy, month_number):
# 文件名,无后缀
tbl_name = self.__class__.bu_name + "_" + data_type
# 数据库完整路径加名称
db_fullname = self.__class__.db_path + tbl_name + ".db"
conn = sqlite3.connect(db_fullname)
c = conn.cursor()
# 创建命令
if price_type == "Standard_Cost":
if hierarchy == "ALL":
str_cmd = "SELECT month, sum(Value_Standard_Cost) from " + tbl_name + " GROUP BY month ORDER BY month"
else:
str_cmd = "SELECT month, sum(Value_Standard_Cost) from " + tbl_name + " WHERE Hierarchy_5 = '" + \
hierarchy + "\' COLLATE NOCASE GROUP BY month ORDER BY month"
else:
if hierarchy == "ALL":
str_cmd = "SELECT month, sum(Value_SAP_Price) from " + tbl_name + " GROUP BY month ORDER BY month"
else:
str_cmd = "SELECT month, sum(Value_SAP_Price) from " + tbl_name + " WHERE Hierarchy_5 = \'" + \
hierarchy + "\' COLLATE NOCASE GROUP BY month ORDER BY month"
c.execute(str_cmd)
sales_result = self.data_mapping(c.fetchall(), pb_fnc.get_current_month(), 0 - month_number)
return sales_result
def get_h5_inventory_data(self, inv_type, price_type, h5_name, month_number):
# 文件名,无后缀
file_name = self.__class__.bu_name + "_" + inv_type + "_INV"
# 数据库完整路径加名称
db_fullname = self.__class__.db_path + file_name + ".db"
# 表格名称,等于文件名称
tbl_name = file_name
conn = sqlite3.connect(db_fullname)
c = conn.cursor()
if h5_name == "ALL":
str_cmd = "SELECT month, SUM(Value_%s) from %s GROUP BY month" % (price_type, tbl_name)
else:
str_cmd = "SELECT month, SUM(Value_%s) from %s WHERE Hierarchy_5 = \"%s\" COLLATE NOCASE " \
"GROUP BY month " % (price_type, tbl_name, h5_name)
c.execute(str_cmd)
h5_inv_result = self.data_mapping(c.fetchall(), pb_fnc.get_current_month(), 0 - month_number)
return h5_inv_result
# get sap_price by code
def get_code_sap_price(self, code_name):
db_fullname = self.__class__.db_path + self.__class__.bu_name + "_Master_Data.db"
table_name = self.__class__.bu_name + "_SAP_Price"
conn = sqlite3.connect(db_fullname)
c = conn.cursor()
c.execute("SELECT Price FROM " + table_name + " WHERE Material = \'" + code_name + "\'")
sap_price_result = c.fetchall()
if not sap_price_result:
return 0
else:
return sap_price_result[0][0]
# get gtin by code
def get_code_gtin(self, code_name):
db_fullname = self.__class__.db_path + "Master_Data.db"
filename = "GTIN"
conn = sqlite3.connect(db_fullname)
c = conn.cursor()
c.execute("SELECT Barcode from " + filename + " WHERE [Material code] = \'" + code_name + "\'")
return c.fetchall()[0][0]
# get RAG by code
def get_code_rag(self, code_name):
db_fullname = self.__class__.db_path + "Master_Data.db"
filename = "RAG_Report"
conn = sqlite3.connect(db_fullname)
c = conn.cursor()
c.execute("SELECT REGLICNO, REGAPDATE, REGEXDATE from " + filename + " WHERE MATNR = \'" + code_name +
"\' ORDER by REGAPDATE")
return c.fetchall()
# get Phoenix Project Status by code
def get_code_phoenix_result(self, material_code):
db_fullname = self.__class__.db_path + self.__class__.bu_name + "_Master_Data.db"
filename = "TU_Phoenix_List"
conn = sqlite3.connect(db_fullname)
c = conn.cursor()
sql_cmd = "SELECT Month, [Target SKU] FROM " + filename + " WHERE [Exit SKU] = \'" + material_code + "\'"
c.execute(sql_cmd)
phoenix_result = c.fetchall()
phoenix_title = ["Phoenix Status", "Stop Manufacturing Date", "Target SKU"]
if len(phoenix_result) == 0:
return [["Phoenix Status"], ["N"]]
else:
return [phoenix_title, ["Y"] + list(phoenix_result[0])]
pass
# by code的销量数据
def get_code_sales(self, data_type, code, month_number):
# 文件名,无后缀
tbl_name = self.__class__.bu_name + "_" + data_type
# 数据库完整路径加名称
db_fullname = self.__class__.db_path + tbl_name + ".db"
conn = sqlite3.connect(db_fullname)
c = conn.cursor()
str_cmd = "SELECT month, SUM(quantity) from " + tbl_name + " WHERE material = \'" + code \
+ "\' GROUP BY month ORDER BY month"
c.execute(str_cmd)
sales_result = self.data_mapping(c.fetchall(), pb_fnc.get_current_month(), 0 - month_number)
conn.close()
return sales_result
# get inventory data by code
# inventory_type: "JNJ", "LP", month_number: positive integer
def get_code_inventory(self, material_code, inventory_type, month_number):
tbl_name = self.__class__.bu_name + "_" + inventory_type + "_INV"
db_fullname = self.__class__.db_path + tbl_name + ".db"
conn = sqlite3.connect(db_fullname)
c = conn.cursor()
stock_column_name = 'Available_Stock' if inventory_type == 'JNJ' else 'quantity'
sql_cmd = 'SELECT month, SUM(%s) FROM %s WHERE Material = \"%s\" GROUP BY month ' \
'ORDER BY month' % (stock_column_name, tbl_name, material_code)
c.execute(sql_cmd)
inventory_result = self.data_mapping(c.fetchall(), pb_fnc.get_current_month(), 0 - month_number)
return inventory_result
# calculate inventory month
@staticmethod
def get_inventory_month(lst_inv, lst_sales, month_number, blank_type=0):
# set blank value as None if blank_type is 1, else set zero
lst_inv_month = []
# leave previous 6 month in blank
for i in range(0, 6):
lst_inv_month.append(0) if blank_type == 0 else lst_inv_month.append(None)
for i in range(0, month_number-6):
if sum(lst_sales[i: i+6]) == 0:
lst_inv_month.append(0) if blank_type == 0 else lst_inv_month.append(None)
else:
lst_inv_month.append(round(lst_inv[i+6] / (sum(lst_sales[i: i+6])/6), 1))
return lst_inv_month
# Generate month list.
# The previous month list does not include current month.
# The future month list include current month.
@staticmethod
def get_time_list(start_point, parameter):
# Get month list in format YYYY-MM (start_point)
# parameter, the month list we need to generate
start_year, start_month = int(start_point[0:4]), int(start_point[-2:])
month_list = []
lower_limit = parameter if parameter <= 0 else 0
upper_limit = parameter if parameter > 0 else 0
for i in range(lower_limit, upper_limit):
t = (start_year, start_month + i, 14, 3, 6, 3, 6, 0, 0)
month_list.append(time.strftime("%Y-%m", time.localtime(time.mktime(t))))
return month_list
# 将数据与指定月份mapping
def data_mapping(self, data, start_month, months):
month_list = self.get_time_list(start_month, months)
result_value = []
for item_month in month_list:
value = 0
for item_value in data:
if item_value[0] == item_month:
value = item_value[1]
result_value.append(value)
return result_value
# get forecast of single code, set fcst_type as Statistical or Final
def get_code_forecast(self, code_name, fcst_type, month_quantity):
db_fullname = self.__class__.db_path + self.__class__.bu_name + "_" + fcst_type + "_Forecast.db"
# get month list and generate blank dataframe
current_month = time.strftime("%Y-%m", time.localtime())
month_list = self.get_time_list(current_month, month_quantity)
df_forecast_final = pd.DataFrame(index=month_list)
# connect to forecast database
conn = sqlite3.connect(db_fullname)
c = conn.cursor()
# get the newest table
c.execute("SELECT name from sqlite_master where type = \"table\" ORDER by name DESC")
tbl_name = c.fetchone()[0]
# get pivot table of forecast
sql_cmd = 'SELECT Month, Quantity FROM %s WHERE Material =\"%s\"' % (tbl_name, code_name)
df_forecast = pd.read_sql(con=conn, sql=sql_cmd, index_col='Month')
# join the two dataframe to mapping
df_forecast_final = df_forecast_final.join(df_forecast)
df_forecast_final.fillna(0, inplace=True)
output = [item[0] for item in df_forecast_final.values.tolist()]
return [month_list, output]
# get forecast of one hierarchy with pandas, set forecast_type as Statistical or Final
def get_h5_forecast(self, h5_name, forecast_type, month_quantity):
# Get future month list
current_month = time.strftime("%Y-%m", time.localtime())
month_list = self.get_time_list(current_month, month_quantity)
df_forecast_result = pd.DataFrame(index=month_list, data=None)
# get forecast data
db_fullname = self.__class__.db_path + self.__class__.bu_name + "_" + forecast_type + "_Forecast.db"
conn = sqlite3.connect(db_fullname)
sql_cmd = 'SELECT name from sqlite_master where type = \"table\" ORDER by name DESC LIMIT 1'
df_table_list = pd.read_sql(sql=sql_cmd, con=conn)
table_name = df_table_list.values.tolist().pop().pop()
# get newest table
if h5_name.upper() == "ALL":
sql_cmd = 'SELECT Month, sum(Value_SAP_Price) FROM %s GROUP by Month Order by Month' % (table_name, )
else:
sql_cmd = 'SELECT Month, sum(Value_SAP_Price) FROM %s WHERE Hierarchy_5 = \"%s\" ' \
'GROUP by Month Order by Month' % (table_name, h5_name)
df_forecast = | pd.read_sql(sql=sql_cmd, con=conn, index_col='Month') | pandas.read_sql |
import codecademylib3_seaborn
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Import the CSV files and create the DataFrames:
user_data = pd.read_csv("user_data.csv")
pop_data = | pd.read_csv("pop_data.csv") | pandas.read_csv |
"""Utilities for working with protein embeddings."""
import torch
from transformers.tokenization_utils import PreTrainedTokenizer
import torch.nn as nn
import numpy as np
import pandas as pd
import os
from sklearn.manifold import TSNE
from scipy.cluster.hierarchy import dendrogram, linkage
import matplotlib.pyplot as plt
import seaborn as sns
from loguru import logger
from tape.tokenizers import IUPAC_VOCAB
class EmbeddingToolkit(object):
"""Retrieve, preprocess and visualize embeddings
Args:
object ([type]): [description]
language_model (transformers.modeling_roberta.RobertaModel): pre-trained language model
"""
def __init__(self, model: nn.Module, tokenizer: PreTrainedTokenizer):
"""Initialize EmbeddingTools
Args:
model (nn.Module): pretrained model
tokenizer (PreTrainedTokenizer): a tokenizer
"""
self.model = model
self.tokenizer = tokenizer
def get_embedding(self, manually_selected_vocab: list, export_to_csv: bool, export_filepath: str, return_full_vocab: bool) -> pd.core.frame.DataFrame:
"""Get embeddings for a corresponding list of tokenizer vocabulary items as a Pandas dataframe object"
Args:
model (nn.Module): [description]
manually_selected_vocab (list): desired amino acid vocabulary, corresponds to keys in a dict returned from calling tokenizer.get_vocab()
return_full_vocab (bool): whether to return an embedding for all tokens, or just the ones from manually_selected_vocab
Returns:
pandas.core.frame.DataFrame: [description]
Example:
manually_selected_vocab = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
# Create model and tokenizer objects
tokenizer = RobertaTokenizer.from_pretrained(dir_tokenizer)
model = RobertaModel.from_pretrained(pretrained_model_name_or_path=dir_model)
Toolkit = EmbeddingToolkit(model=model, tokenizer=tokenizer)
df = Toolkit.get_embedding(manually_selected_vocab=manually_selected_vocab)
logger.info(df)
0 1 2 3 ... 766 767
A -0.012096 -0.042020 -0.027793 -0.006360 ... 0.001138 -0.000997
B 0.018540 0.060982 0.055752 0.012910 ... 0.049360 0.013828
C -0.003167 0.001412 -0.026587 -0.040021 ... -0.033149 -0.006456
. . . . ... . .
. . . . ... . .
Y 0.026067 0.002919 -0.032527 0.025508 -0.018694 0.037993
Z -0.002928 0.016255 0.033822 -0.028604 0.000767 -0.035366
"""
# logger.info('Retrieving embeddings for ', manually_selected_vocab, '\n')
embedding = self.model.get_input_embeddings()
tokens = self.tokenizer.get_vocab() # could be moved outside to speed up, if the dict is huge
if return_full_vocab == True:
tokens_aa = tokens
logger.info('Returning embeddings for all %d vocabulary tokens', len(tokens))
else:
tokens_aa = {k: tokens[k] for k in manually_selected_vocab} # dict {'A': 37, 'B': 38, ..., 'Y': 61, 'Z': 62}
embedded_tokens_df = pd.DataFrame(data = [embedding.weight[token].tolist() for token in tokens_aa.values()], index=tokens_aa.keys())
# logger.info('Head and Tail of embeddings dataframe: ')
# logger.info(embedded_tokens_df.head(), '\n')
logger.debug(embedded_tokens_df.tail())
if export_to_csv == True:
embedded_tokens_df.to_csv(path_or_buf=export_filepath)
logger.info('Exported model to: %s', export_filepath)
return embedded_tokens_df
def get_tsne(self, embedding_df: pd.core.frame.DataFrame, tsne_dim: int, export_to_csv: bool, export_filepath: str) -> pd.core.frame.DataFrame:
"""Compresses high dimensional word embedding into `tsne_dim` embedding, and return a pandas df. Used for visualization only.
recommended to use another dimensionality reduction method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50) if the number of embedding dimensions is very high.
Args:
embedding_df (pd.core.frame.DataFrame): Dataframe with embeddings of size [number_amino_acids x embedding_dimension]
tsne_dim (int): see more at https://scikit-learn.org/stable/modules/generated/sklearn.manifold.TSNE.html
Returns:
pd.core.frame.DataFrame: a dataframe with t-sne of embeddings, dimensions [number_amino_acids x tsne_dim]
0 1
A 13.062521 9.171124
C 36.266224 -11.948713
D -36.986889 14.661242
. . .
. . .
Y -26.306509 -23.310379
Z -3.202672 35.785797
"""
tsne_result = TSNE(
n_components=tsne_dim,
perplexity=5,
min_grad_norm=1E-7,
n_iter=250,
learning_rate=40,
init='pca',
verbose=1
).fit_transform(embedding_df)
df = | pd.DataFrame(data=tsne_result, index=embedding_df.index) | pandas.DataFrame |
# tests.test_model_selection.test_validation_curve
# Tests for the ValidationCurve visualizer
#
# Author: <NAME>
# Created: Sat Mar 31 06:25:05 2018 -0400
#
# Copyright (C) 2018 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: test_validation_curve.py [c5355ee] <EMAIL> $
"""
Tests for the ValidationCurve visualizer
"""
##########################################################################
# Imports
##########################################################################
import sys
import pytest
import numpy as np
from unittest.mock import patch
from tests.base import VisualTestCase
from sklearn.svm import SVC
from sklearn.naive_bayes import BernoulliNB
from sklearn.tree import DecisionTreeRegressor
from sklearn.preprocessing import OneHotEncoder
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import ShuffleSplit, StratifiedKFold
from yellowbrick.datasets import load_mushroom
from yellowbrick.exceptions import YellowbrickValueError
from yellowbrick.model_selection.validation_curve import *
try:
import pandas as pd
except ImportError:
pd = None
##########################################################################
# Test Cases
##########################################################################
@pytest.mark.usefixtures("classification", "regression", "clusters")
class TestValidationCurve(VisualTestCase):
"""
Test the ValidationCurve visualizer
"""
@patch.object(ValidationCurve, "draw")
def test_fit(self, mock_draw):
"""
Assert that fit returns self and creates expected properties
"""
X, y = self.classification
params = (
"train_scores_",
"train_scores_mean_",
"train_scores_std_",
"test_scores_",
"test_scores_mean_",
"test_scores_std_",
)
oz = ValidationCurve(
SVC(), param_name="gamma", param_range=np.logspace(-6, -1, 5)
)
for param in params:
assert not hasattr(oz, param)
assert oz.fit(X, y) is oz
mock_draw.assert_called_once()
for param in params:
assert hasattr(oz, param)
@pytest.mark.xfail(sys.platform == "win32", reason="images not close on windows")
def test_classifier(self):
"""
Test image closeness on a classification dataset with kNN
"""
X, y = self.classification
cv = ShuffleSplit(3, random_state=288)
param_range = np.arange(3, 10)
oz = ValidationCurve(
KNeighborsClassifier(),
param_name="n_neighbors",
param_range=param_range,
cv=cv,
scoring="f1_weighted",
)
oz.fit(X, y)
oz.finalize()
self.assert_images_similar(oz)
def test_regression(self):
"""
Test image closeness on a regression dataset with a DecisionTree
"""
X, y = self.regression
cv = ShuffleSplit(3, random_state=938)
param_range = np.arange(3, 10)
oz = ValidationCurve(
DecisionTreeRegressor(random_state=23),
param_name="max_depth",
param_range=param_range,
cv=cv,
scoring="r2",
)
oz.fit(X, y)
oz.finalize()
self.assert_images_similar(oz, tol=12.0)
@pytest.mark.xfail(sys.platform == "win32", reason="images not close on windows")
def test_quick_method(self):
"""
Test validation curve quick method with image closeness on SVC
"""
X, y = self.classification
pr = np.logspace(-6, -1, 3)
cv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=321)
viz = validation_curve(
SVC(), X, y, logx=True, param_name="gamma",
param_range=pr, cv=cv, show=False
)
self.assert_images_similar(viz)
@pytest.mark.xfail(sys.platform == "win32", reason="images not close on windows")
@pytest.mark.skipif(pd is None, reason="test requires pandas")
def test_pandas_integration(self):
"""
Test on mushroom dataset with pandas DataFrame and Series and NB
"""
data = load_mushroom(return_dataset=True)
X, y = data.to_pandas()
X = | pd.get_dummies(X) | pandas.get_dummies |
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyspark.sql import SparkSession
import argparse
from time import time
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import pandas as pd
def quiet_logs(sc, log_level="ERROR"):
"""
Set the level of log in Spark.
Parameters
----------
sc : SparkContext
The SparkContext for the session
log_level : String [optional]
Level of log wanted: INFO, WARN, ERROR, OFF, etc.
"""
## Get the logger
logger = sc._jvm.org.apache.log4j
## Set the level
level = getattr(logger.Level, log_level, "INFO")
logger.LogManager.getLogger("org"). setLevel(level)
logger.LogManager.getLogger("akka").setLevel(level)
def addargs(parser):
""" Parse command line arguments for benchmark_io """
## Arguments
parser.add_argument(
'-inputpath', dest='inputpath',
required=True,
help='Path to a FITS file or a directory containing FITS files')
parser.add_argument(
'-nloops', dest='nloops',
required=True, type=int,
help='Number of times to run the benchmark.')
parser.add_argument(
'-log_level', dest='log_level',
default="ERROR",
help='Level of log for Spark. Default is ERROR.')
if __name__ == "__main__":
"""
Benchmarking Apache Spark FITS connector.
"""
parser = argparse.ArgumentParser(
description="""
Benchmarking Apache Spark FITS connector.""")
addargs(parser)
args = parser.parse_args(None)
spark = SparkSession\
.builder\
.getOrCreate()
## Set logs to be quiet
quiet_logs(spark.sparkContext, log_level=args.log_level)
## FITS
df_fits = spark.read.format("fits")\
.option("hdu", 1)\
.load(args.inputpath)
## Burning time
for loop in range(10):
df_fits.count()
## Record times
outputs = {"spark-fits": []}
for loop in range(args.nloops):
t0 = time()
df_fits.count()
outputs["spark-fits"].append(time() - t0)
pdf = | pd.DataFrame(outputs) | pandas.DataFrame |
#!/usr/bin/env python3
#
# Copyright (c) 2015 - 2022, Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
'''
Prints a summary of the data from a power sweep experiment.
'''
import sys
import pandas
import argparse
import geopmpy.io
from experiment import common_args
def summary(parse_output):
# rename some columns
parse_output['power_limit'] = parse_output['POWER_PACKAGE_LIMIT_TOTAL']
parse_output['runtime'] = parse_output['runtime (s)']
parse_output['network_time'] = parse_output['time-hint-network (s)']
parse_output['energy_pkg'] = parse_output['package-energy (J)']
parse_output['energy_dram'] = parse_output['dram-energy (J)']
parse_output['frequency'] = parse_output['frequency (Hz)']
parse_output['achieved_power'] = parse_output['energy_pkg'] / parse_output['sync-runtime (s)']
parse_output['iteration'] = parse_output.apply(lambda row: row['Profile'].split('_')[-1],
axis=1)
# add extra columns
parse_output['cpu_time'] = parse_output['runtime'] - parse_output['network_time']
# set up index for grouping
parse_output = parse_output.set_index(['Agent', 'host', 'power_limit'])
summary = | pandas.DataFrame() | pandas.DataFrame |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from typing import Tuple
def clean_df_headers(df: pd.DataFrame) -> pd.DataFrame:
"""Remove leading and trailing spaces in DataFrame headers."""
headers = pd.Series(df.columns)
new_headers = [header.strip() for header in headers]
new_headers = pd.Series(new_headers)
df.columns = new_headers
return df
def configure_ax(ax: plt.axes,
df: pd.DataFrame = None,
xlabel: str = None,
ylabel: Tuple[int,int] = None,
ylim: str = None,
title: str = None,
legend: bool = False
) -> plt.axes:
"""Configure Matplotlib axe."""
if df is not None:
x = df.index
for h in df.columns:
y = df[h]
ax.plot(x, y,label=h)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if ylim is not None:
ax.set_ylim(ylim)
if title is not None:
ax.set_title(title)
if legend is not None:
ax.legend()
return ax
if __name__ == "__main__":
# Load sensor data
df_data = | pd.read_csv("step_03_-_scenario_08_-_after_tuning.txt") | pandas.read_csv |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/11_interface.ipynb (unless otherwise specified).
__all__ = ['tqdm_wrapper', 'check_version_and_hardware', 'wrapped_partial', 'create_database', 'import_raw_data',
'feature_finding', 'search_data', 'recalibrate_data', 'score', 'isobaric_labeling', 'protein_grouping',
'align', 'match', 'read_label_intensity', 'quantification', 'export', 'run_complete_workflow',
'extract_median_unique', 'get_file_summary', 'get_summary', 'parallel_execute', 'bcolors', 'is_port_in_use',
'run_cli', 'cli_overview', 'cli_database', 'cli_import', 'cli_feature_finding', 'cli_search',
'cli_recalibrate', 'cli_score', 'cli_align', 'cli_match', 'cli_quantify', 'cli_export', 'cli_workflow',
'cli_gui', 'CONTEXT_SETTINGS', 'CLICK_SETTINGS_OPTION']
# Cell
import alphapept.utils
from .utils import set_logger
import alphapept.performance
import logging
import sys
import numpy as np
import psutil
import tqdm
def tqdm_wrapper(pbar, update: float) -> None:
"""Update a qdm progress bar.
Args:
pbar (type): a tqd,.tqdm objet.
update (float): The new value for the progressbar.
"""
current_value = pbar.n
delta = update - current_value
pbar.update(delta)
# Cell
def check_version_and_hardware(settings: dict) -> dict:
"""Show platform and python information and parse settings.
Args:
settings (dict): A dictionary with settings how to process the data.
Returns:
dict: The parsed settings.
"""
import alphapept.utils
#alphapept.utils.check_hardware()
#alphapept.utils.check_python_env()
alphapept.utils.show_platform_info()
alphapept.utils.show_python_info()
settings = alphapept.utils.check_settings(settings)
return settings
# Cell
def wrapped_partial(func: callable, *args, **kwargs) -> callable:
"""Wrap a function with partial args and kwargs.
Args:
func (callable): The function to be wrapped.
*args (type): Args to be wrapped.
**kwargs (type): Kwargs to be wrapped.
Returns:
callable: The wrapped function.
"""
partial_func = functools.partial(func, *args, **kwargs)
functools.update_wrapper(partial_func, func)
return partial_func
# Cell
import os
import functools
import copy
def create_database(
settings: dict,
logger_set: bool = False,
settings_parsed: bool = False,
callback: callable = None
) -> dict:
"""Create the search database.
Args:
settings (dict): A dictionary with settings how to process the data.
logger_set (bool): If False, reset the default logger. Defaults to False.
settings_parsed (bool): If True, reparse the settings. Defaults to False.
callback (callable): A function that accepts a float between 0 and 1 as progress. Defaults to None.
Returns:
dict: the parsed settings.
Raises:
FileNotFoundError: If the FASTA file is not found.
"""
import alphapept.fasta
if not logger_set:
set_logger()
if not settings_parsed:
settings = check_version_and_hardware(settings)
if 'database_path' not in settings['experiment']:
database_path = ''
else:
database_path = settings['experiment']['database_path']
if database_path is None:
database_path = ''
if not settings['fasta']['save_db']: #Do not save DB
settings['experiment']['database_path'] = None
logging.info('Not saving Database.')
return settings
temp_settings = settings
if os.path.isfile(database_path):
logging.info(
'Database path set and exists. Using {} as database.'.format(
database_path
)
)
else:
logging.info(
'Database path {} is not a file.'.format(database_path)
)
if len(settings['experiment']['fasta_paths']) == 0:
raise FileNotFoundError("No FASTA files set.")
total_fasta_size = 0
for fasta_file in settings['experiment']['fasta_paths']:
if os.path.isfile(fasta_file):
fasta_size = os.stat(fasta_file).st_size/(1024**2)
total_fasta_size += fasta_size
logging.info(
'Found FASTA file {} with size {:.2f} Mb.'.format(
fasta_file,
fasta_size
)
)
else:
raise FileNotFoundError(
'File {} not found'.format(fasta_file)
)
fasta_size_max = settings['fasta']['fasta_size_max']
if total_fasta_size >= fasta_size_max:
logging.info(f'Total FASTA size {total_fasta_size:.2f} is larger than the set maximum size of {fasta_size_max:.2f} Mb')
settings['experiment']['database_path'] = None
return settings
logging.info('Creating a new database from FASTA.')
if not callback:
cb = functools.partial(tqdm_wrapper, tqdm.tqdm(total=1))
else:
cb = callback
(
spectra,
pept_dict,
fasta_dict
) = alphapept.fasta.generate_database_parallel(
temp_settings,
callback=cb
)
logging.info(
'Digested {:,} proteins and generated {:,} spectra'.format(
len(fasta_dict),
len(spectra)
)
)
alphapept.fasta.save_database(
spectra,
pept_dict,
fasta_dict,
database_path = database_path,
**settings['fasta']
)
logging.info(
'Database saved to {}. Filesize of database is {:.2f} GB'.format(
database_path,
os.stat(database_path).st_size/(1024**3)
)
)
settings['experiment']['database_path'] = database_path
return settings
# Cell
def import_raw_data(
settings: dict,
logger_set: bool = False,
settings_parsed: bool = False,
callback: callable = None
) -> dict:
"""Import raw data.
Args:
settings (dict): A dictionary with settings how to process the data.
logger_set (bool): If False, reset the default logger. Defaults to False.
settings_parsed (bool): If True, reparse the settings. Defaults to False.
callback (callable): A function that accepts a float between 0 and 1 as progress. Defaults to None.
Returns:
dict: the parsed settings.
"""
if not logger_set:
set_logger()
if not settings_parsed:
settings = check_version_and_hardware(settings)
if not callback:
cb = functools.partial(tqdm_wrapper, tqdm.tqdm(total=1))
else:
cb = callback
import alphapept.io
settings = parallel_execute(settings, alphapept.io.raw_conversion, callback = cb)
return settings
# Cell
def feature_finding(
settings: dict,
logger_set: bool = False,
settings_parsed: bool = False,
callback: callable = None
) -> dict:
"""Find features.
Args:
settings (dict): A dictionary with settings how to process the data.
logger_set (bool): If False, reset the default logger. Defaults to False.
settings_parsed (bool): If True, reparse the settings. Defaults to False.
callback (callable): A function that accepts a float between 0 and 1 as progress. Defaults to None.
Returns:
dict: the parsed settings.
"""
if not logger_set:
set_logger()
if not settings_parsed:
settings = check_version_and_hardware(settings)
import alphapept.feature_finding
if not callback:
cb = functools.partial(tqdm_wrapper, tqdm.tqdm(total=1))
else:
cb = callback
settings = parallel_execute(settings, alphapept.feature_finding.find_features, callback = cb)
return settings
# Cell
def search_data(
settings: dict,
first_search: bool = True,
logger_set: bool = False,
settings_parsed: bool = False,
callback: callable = None
) -> dict:
"""Create the search database.
Args:
settings (dict): A dictionary with settings how to process the data.
first_search (bool): If True, save the intermediary results as `first search`.
Otherwise, calibrate mz_values are used and results are saved as `second search`.
Defaults to True.
logger_set (bool): If False, reset the default logger. Defaults to False.
settings_parsed (bool): If True, reparse the settings. Defaults to False.
callback (callable): A function that accepts a float between 0 and 1 as progress. Defaults to None.
Returns:
dict: the parsed settings.
Raises:
FileNotFoundError: If the FASTA file is not found.
"""
if not logger_set:
set_logger()
if not settings_parsed:
settings = check_version_and_hardware(settings)
import alphapept.search
import alphapept.io
if not callback:
cb = functools.partial(tqdm_wrapper, tqdm.tqdm(total=1))
else:
cb = callback
if first_search:
logging.info('Starting first search.')
if settings['experiment']['database_path'] is not None:
settings = parallel_execute(settings, wrapped_partial(alphapept.search.search_db, first_search = first_search), callback = cb)
db_data = alphapept.fasta.read_database(settings['experiment']['database_path'])
fasta_dict = db_data['fasta_dict'].item()
pept_dict = db_data['pept_dict'].item()
else:
ms_files = []
for _ in settings['experiment']['file_paths']:
base, ext = os.path.splitext(_)
ms_files.append(base + '.ms_data.hdf')
fasta_dict = alphapept.search.search_parallel(
settings,
callback=cb
)
pept_dict = None
logging.info('First search complete.')
else:
logging.info('Starting second search with DB.')
if settings['experiment']['database_path'] is not None:
settings = parallel_execute(settings, wrapped_partial(alphapept.search.search_db, first_search = first_search), callback = cb)
db_data = alphapept.fasta.read_database(settings['experiment']['database_path'])
fasta_dict = db_data['fasta_dict'].item()
pept_dict = db_data['pept_dict'].item()
else:
ms_files = []
for _ in settings['experiment']['file_paths']:
base, ext = os.path.splitext(_)
ms_files.append(base + '.ms_data.hdf')
try:
offsets = [
alphapept.io.MS_Data_File(
ms_file_name
).read(
dataset_name="corrected_mass",
group_name="features",
attr_name="estimated_max_precursor_ppm"
) * settings['search']['calibration_std_prec'] for ms_file_name in ms_files
]
except KeyError:
logging.info('No calibration found.')
offsets = None
try:
frag_tols = [float(
alphapept.io.MS_Data_File(
ms_file_name
).read(dataset_name="estimated_max_fragment_ppm")[0] * settings['search']['calibration_std_prec']) for ms_file_name in ms_files
]
except KeyError:
logging.info('Fragment tolerance not calibrated found.')
frag_tols = None
logging.info('Starting second search.')
fasta_dict = alphapept.search.search_parallel(
settings,
calibration=offsets,
fragment_calibration=frag_tols,
callback=cb
)
pept_dict = None
logging.info('Second search complete.')
return settings, pept_dict, fasta_dict
# Cell
def recalibrate_data(
settings: dict,
logger_set: bool = False,
settings_parsed: bool = False,
callback: callable = None
) -> dict:
"""Recalibrate mz values.
Args:
settings (dict): A dictionary with settings how to process the data.
logger_set (bool): If False, reset the default logger. Defaults to False.
settings_parsed (bool): If True, reparse the settings. Defaults to False.
callback (callable): A function that accepts a float between 0 and 1 as progress. Defaults to None.
Returns:
dict: the parsed settings.
"""
if not logger_set:
set_logger()
if not settings_parsed:
settings = check_version_and_hardware(settings)
import alphapept.recalibration
if settings['search']['calibrate']:
if not callback:
cb = functools.partial(tqdm_wrapper, tqdm.tqdm(total=1))
else:
cb = callback
settings = parallel_execute(settings, alphapept.recalibration.calibrate_hdf, callback = cb)
return settings
# Cell
def score(
settings: dict,
pept_dict: dict = None,
fasta_dict: dict = None,
logger_set: bool = False,
settings_parsed: bool = False,
callback: callable = None
) -> dict:
"""Score PSMs and calculate FDR.
Args:
settings (dict): A dictionary with settings how to process the data.
pept_dict (dict): A dictionary with peptides. Defaults to None.
fasta_dict (dict): A dictionary with fasta sequences. Defaults to None.
logger_set (bool): If False, reset the default logger. Defaults to False.
settings_parsed (bool): If True, reparse the settings. Defaults to False.
callback (callable): A function that accepts a float between 0 and 1 as progress. Defaults to None.
Returns:
dict: the parsed settings.
"""
if not logger_set:
set_logger()
if not settings_parsed:
settings = check_version_and_hardware(settings)
import alphapept.score
import alphapept.fasta
if not callback:
cb = functools.partial(tqdm_wrapper, tqdm.tqdm(total=1))
else:
cb = callback
if fasta_dict is None:
db_data = alphapept.fasta.read_database(
settings['experiment']['database_path']
)
fasta_dict = db_data['fasta_dict'].item()
pept_dict = db_data['pept_dict'].item()
settings = parallel_execute(settings, alphapept.score.score_hdf, callback = cb)
return settings
# Cell
def isobaric_labeling(
settings: dict,
logger_set: bool = False,
settings_parsed: bool = False,
callback: callable = None
) -> dict:
"""Search for isobaric labels.
Args:
settings (dict): A dictionary with settings how to process the data.
logger_set (bool): If False, reset the default logger. Defaults to False.
settings_parsed (bool): If True, reparse the settings. Defaults to False.
callback (callable): A function that accepts a float between 0 and 1 as progress. Defaults to None.
Returns:
dict: the parsed settings.
"""
if 'isobaric_label' in settings:
if settings['isobaric_label']['label'] != 'None':
if not logger_set:
set_logger()
if not settings_parsed:
settings = check_version_and_hardware(settings)
import alphapept.label
if settings['search']['calibrate']:
if not callback:
cb = functools.partial(tqdm_wrapper, tqdm.tqdm(total=1))
else:
cb = callback
settings = parallel_execute(settings, alphapept.label.find_labels, callback = cb)
return settings
# Cell
def protein_grouping(
settings: dict,
pept_dict: dict = None,
fasta_dict: dict = None,
logger_set: bool = False,
settings_parsed: bool = False,
callback: callable = None
) -> dict:
"""Group peptides into proteins.
Args:
settings (dict): A dictionary with settings how to process the data.
pept_dict (dict): A dictionary with peptides. Defaults to None.
fasta_dict (dict): A dictionary with fasta sequences. Defaults to None.
logger_set (bool): If False, reset the default logger. Defaults to False.
settings_parsed (bool): If True, reparse the settings. Defaults to False.
callback (callable): A function that accepts a float between 0 and 1 as progress. Defaults to None.
Returns:
dict: the parsed settings.
"""
if not logger_set:
set_logger()
if not settings_parsed:
settings = check_version_and_hardware(settings)
import alphapept.score
import alphapept.fasta
if not callback:
cb = functools.partial(tqdm_wrapper, tqdm.tqdm(total=1))
else:
cb = callback
if fasta_dict is None:
db_data = alphapept.fasta.read_database(
settings['experiment']['database_path']
)
fasta_dict = db_data['fasta_dict'].item()
pept_dict = db_data['pept_dict'].item()
if pept_dict is None: #Pept dict extractions needs scored
pept_dict = alphapept.fasta.pept_dict_from_search(settings)
logging.info(f'Fasta dict with length {len(fasta_dict):,}, Pept dict with length {len(pept_dict):,}')
# Protein groups
logging.info('Extracting protein groups.')
if not callback:
cb = functools.partial(tqdm_wrapper, tqdm.tqdm(total=1))
else:
cb = callback
alphapept.score.protein_grouping_all(settings, pept_dict, fasta_dict, callback=cb)
logging.info('Protein groups complete.')
return settings
# Cell
import pandas as pd
def align(
settings: dict,
logger_set: bool = False,
settings_parsed: bool = False,
callback: callable = None
) -> dict:
"""Align multiple samples.
Args:
settings (dict): A dictionary with settings how to process the data.
logger_set (bool): If False, reset the default logger. Defaults to False.
settings_parsed (bool): If True, reparse the settings. Defaults to False.
callback (callable): A function that accepts a float between 0 and 1 as progress. Defaults to None.
Returns:
dict: the parsed settings.
"""
if not logger_set:
set_logger()
if not settings_parsed:
settings = check_version_and_hardware(settings)
import alphapept.matching
alphapept.matching.align_datasets(settings, callback = callback)
return settings
def match(
settings: dict,
logger_set: bool = False,
settings_parsed: bool = False,
callback: callable = None
) -> dict:
"""Match datasets.
Args:
settings (dict): A dictionary with settings how to process the data.
logger_set (bool): If False, reset the default logger. Defaults to False.
settings_parsed (bool): If True, reparse the settings. Defaults to False.
callback (callable): A function that accepts a float between 0 and 1 as progress. Defaults to None.
Returns:
dict: the parsed settings.
"""
if not logger_set:
set_logger()
if not settings_parsed:
settings = check_version_and_hardware(settings)
import alphapept.matching
alphapept.matching.match_datasets(settings)
return settings
# Cell
from typing import NamedTuple
def read_label_intensity(df : pd.DataFrame, label: NamedTuple) -> pd.DataFrame:
"""Reads the label intensities from peptides and sums them by protein group.
Args:
df (pd.DataFrame): Table with peptide information.
label (NamedTuple): Label used for the experiment.
Returns:
pd.DataFrame: Summary protein table containing proteins and their intensity for each channel."""
all_channels = []
for channel in label.channels:
_ = df[['protein_group', channel]].groupby('protein_group').sum()
all_channels.append(_)
protein_table = | pd.concat(all_channels, axis=1) | pandas.concat |
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
# coding: utf-8
"""
Adds electrical generators and existing hydro storage units to a base network.
Relevant Settings
-----------------
.. code:: yaml
costs:
year:
USD2013_to_EUR2013:
dicountrate:
emission_prices:
electricity:
max_hours:
marginal_cost:
capital_cost:
conventional_carriers:
co2limit:
extendable_carriers:
include_renewable_capacities_from_OPSD:
estimate_renewable_capacities_from_capacity_stats:
load:
scaling_factor:
renewable:
hydro:
carriers:
hydro_max_hours:
hydro_capital_cost:
lines:
length_factor:
.. seealso::
Documentation of the configuration file ``config.yaml`` at :ref:`costs_cf`,
:ref:`electricity_cf`, :ref:`load_cf`, :ref:`renewable_cf`, :ref:`lines_cf`
Inputs
------
- ``data/costs.csv``: The database of cost assumptions for all included technologies for specific years from various sources; e.g. discount rate, lifetime, investment (CAPEX), fixed operation and maintenance (FOM), variable operation and maintenance (VOM), fuel costs, efficiency, carbon-dioxide intensity.
- ``data/bundle/hydro_capacities.csv``: Hydropower plant store/discharge power capacities, energy storage capacity, and average hourly inflow by country.
.. image:: ../img/hydrocapacities.png
:scale: 34 %
- ``data/geth2015_hydro_capacities.csv``: alternative to capacities above; not currently used!
- ``resources/opsd_load.csv`` Hourly per-country load profiles.
- ``resources/regions_onshore.geojson``: confer :ref:`busregions`
- ``resources/nuts3_shapes.geojson``: confer :ref:`shapes`
- ``resources/powerplants.csv``: confer :ref:`powerplants`
- ``resources/profile_{}.nc``: all technologies in ``config["renewables"].keys()``, confer :ref:`renewableprofiles`.
- ``networks/base.nc``: confer :ref:`base`
Outputs
-------
- ``networks/elec.nc``:
.. image:: ../img/elec.png
:scale: 33 %
Description
-----------
The rule :mod:`add_electricity` ties all the different data inputs from the preceding rules together into a detailed PyPSA network that is stored in ``networks/elec.nc``. It includes:
- today's transmission topology and transfer capacities (optionally including lines which are under construction according to the config settings ``lines: under_construction`` and ``links: under_construction``),
- today's thermal and hydro power generation capacities (for the technologies listed in the config setting ``electricity: conventional_carriers``), and
- today's load time-series (upsampled in a top-down approach according to population and gross domestic product)
It further adds extendable ``generators`` with **zero** capacity for
- photovoltaic, onshore and AC- as well as DC-connected offshore wind installations with today's locational, hourly wind and solar capacity factors (but **no** current capacities),
- additional open- and combined-cycle gas turbines (if ``OCGT`` and/or ``CCGT`` is listed in the config setting ``electricity: extendable_carriers``)
"""
import logging
from _helpers import configure_logging, update_p_nom_max
import pypsa
import pandas as pd
import numpy as np
import xarray as xr
import geopandas as gpd
import powerplantmatching as pm
from powerplantmatching.export import map_country_bus
from vresutils.costdata import annuity
from vresutils import transfer as vtransfer
idx = pd.IndexSlice
logger = logging.getLogger(__name__)
def normed(s): return s/s.sum()
def _add_missing_carriers_from_costs(n, costs, carriers):
missing_carriers = pd.Index(carriers).difference(n.carriers.index)
if missing_carriers.empty: return
emissions_cols = costs.columns.to_series()\
.loc[lambda s: s.str.endswith('_emissions')].values
suptechs = missing_carriers.str.split('-').str[0]
emissions = costs.loc[suptechs, emissions_cols].fillna(0.)
emissions.index = missing_carriers
n.import_components_from_dataframe(emissions, 'Carrier')
def load_costs(tech_costs, config, elec_config, Nyears=1.):
# set all asset costs and other parameters
costs = pd.read_csv(tech_costs, index_col=list(range(3))).sort_index()
# correct units to MW and EUR
costs.loc[costs.unit.str.contains("/kW"),"value"] *= 1e3
costs.loc[costs.unit.str.contains("USD"),"value"] *= config['USD2013_to_EUR2013']
costs = (costs.loc[idx[:,config['year'],:], "value"]
.unstack(level=2).groupby("technology").sum(min_count=1))
costs = costs.fillna({"CO2 intensity" : 0,
"FOM" : 0,
"VOM" : 0,
"discount rate" : config['discountrate'],
"efficiency" : 1,
"fuel" : 0,
"investment" : 0,
"lifetime" : 25})
costs["capital_cost"] = ((annuity(costs["lifetime"], costs["discount rate"]) +
costs["FOM"]/100.) *
costs["investment"] * Nyears)
costs.at['OCGT', 'fuel'] = costs.at['gas', 'fuel']
costs.at['CCGT', 'fuel'] = costs.at['gas', 'fuel']
costs['marginal_cost'] = costs['VOM'] + costs['fuel'] / costs['efficiency']
costs = costs.rename(columns={"CO2 intensity": "co2_emissions"})
costs.at['OCGT', 'co2_emissions'] = costs.at['gas', 'co2_emissions']
costs.at['CCGT', 'co2_emissions'] = costs.at['gas', 'co2_emissions']
costs.at['solar', 'capital_cost'] = 0.5*(costs.at['solar-rooftop', 'capital_cost'] +
costs.at['solar-utility', 'capital_cost'])
def costs_for_storage(store, link1, link2=None, max_hours=1.):
capital_cost = link1['capital_cost'] + max_hours * store['capital_cost']
if link2 is not None:
capital_cost += link2['capital_cost']
return pd.Series(dict(capital_cost=capital_cost,
marginal_cost=0.,
co2_emissions=0.))
max_hours = elec_config['max_hours']
costs.loc["battery"] = \
costs_for_storage(costs.loc["battery storage"], costs.loc["battery inverter"],
max_hours=max_hours['battery'])
costs.loc["H2"] = \
costs_for_storage(costs.loc["hydrogen storage"], costs.loc["fuel cell"],
costs.loc["electrolysis"], max_hours=max_hours['H2'])
for attr in ('marginal_cost', 'capital_cost'):
overwrites = config.get(attr)
if overwrites is not None:
overwrites = pd.Series(overwrites)
costs.loc[overwrites.index, attr] = overwrites
return costs
def load_powerplants(ppl_fn):
carrier_dict = {'ocgt': 'OCGT', 'ccgt': 'CCGT', 'bioenergy': 'biomass',
'ccgt, thermal': 'CCGT', 'hard coal': 'coal'}
return (pd.read_csv(ppl_fn, index_col=0, dtype={'bus': 'str'})
.powerplant.to_pypsa_names()
.rename(columns=str.lower).drop(columns=['efficiency'])
.replace({'carrier': carrier_dict}))
def attach_load(n, regions, load, nuts3_shapes, countries, scaling=1.):
substation_lv_i = n.buses.index[n.buses['substation_lv']]
regions = (gpd.read_file(regions).set_index('name')
.reindex(substation_lv_i))
opsd_load = (pd.read_csv(load, index_col=0, parse_dates=True)
.filter(items=countries))
logger.info(f"Load data scaled with scalling factor {scaling}.")
opsd_load *= scaling
nuts3 = gpd.read_file(nuts3_shapes).set_index('index')
def upsample(cntry, group):
l = opsd_load[cntry]
if len(group) == 1:
return pd.DataFrame({group.index[0]: l})
else:
nuts3_cntry = nuts3.loc[nuts3.country == cntry]
transfer = vtransfer.Shapes2Shapes(group, nuts3_cntry.geometry,
normed=False).T.tocsr()
gdp_n = pd.Series(transfer.dot(nuts3_cntry['gdp'].fillna(1.).values),
index=group.index)
pop_n = pd.Series(transfer.dot(nuts3_cntry['pop'].fillna(1.).values),
index=group.index)
# relative factors 0.6 and 0.4 have been determined from a linear
# regression on the country to continent load data
factors = normed(0.6 * normed(gdp_n) + 0.4 * normed(pop_n))
return pd.DataFrame(factors.values * l.values[:,np.newaxis],
index=l.index, columns=factors.index)
load = pd.concat([upsample(cntry, group) for cntry, group
in regions.geometry.groupby(regions.country)], axis=1)
n.madd("Load", substation_lv_i, bus=substation_lv_i, p_set=load)
def update_transmission_costs(n, costs, length_factor=1.0, simple_hvdc_costs=False):
# TODO: line length factor of lines is applied to lines and links.
# Separate the function to distinguish.
n.lines['capital_cost'] = (n.lines['length'] * length_factor *
costs.at['HVAC overhead', 'capital_cost'])
if n.links.empty: return
dc_b = n.links.carrier == 'DC'
# If there are no dc links, then the 'underwater_fraction' column
# may be missing. Therefore we have to return here.
if n.links.loc[dc_b].empty: return
if simple_hvdc_costs:
costs = (n.links.loc[dc_b, 'length'] * length_factor *
costs.at['HVDC overhead', 'capital_cost'])
else:
costs = (n.links.loc[dc_b, 'length'] * length_factor *
((1. - n.links.loc[dc_b, 'underwater_fraction']) *
costs.at['HVDC overhead', 'capital_cost'] +
n.links.loc[dc_b, 'underwater_fraction'] *
costs.at['HVDC submarine', 'capital_cost']) +
costs.at['HVDC inverter pair', 'capital_cost'])
n.links.loc[dc_b, 'capital_cost'] = costs
def attach_wind_and_solar(n, costs, input_profiles, technologies, line_length_factor=1):
# TODO: rename tech -> carrier, technologies -> carriers
for tech in technologies:
if tech == 'hydro': continue
n.add("Carrier", name=tech)
with xr.open_dataset(getattr(input_profiles, 'profile_' + tech)) as ds:
if ds.indexes['bus'].empty: continue
suptech = tech.split('-', 2)[0]
if suptech == 'offwind':
underwater_fraction = ds['underwater_fraction'].to_pandas()
connection_cost = (line_length_factor *
ds['average_distance'].to_pandas() *
(underwater_fraction *
costs.at[tech + '-connection-submarine', 'capital_cost'] +
(1. - underwater_fraction) *
costs.at[tech + '-connection-underground', 'capital_cost']))
capital_cost = (costs.at['offwind', 'capital_cost'] +
costs.at[tech + '-station', 'capital_cost'] +
connection_cost)
logger.info("Added connection cost of {:0.0f}-{:0.0f} Eur/MW/a to {}"
.format(connection_cost.min(), connection_cost.max(), tech))
else:
capital_cost = costs.at[tech, 'capital_cost']
n.madd("Generator", ds.indexes['bus'], ' ' + tech,
bus=ds.indexes['bus'],
carrier=tech,
p_nom_extendable=True,
p_nom_max=ds['p_nom_max'].to_pandas(),
weight=ds['weight'].to_pandas(),
marginal_cost=costs.at[suptech, 'marginal_cost'],
capital_cost=capital_cost,
efficiency=costs.at[suptech, 'efficiency'],
p_max_pu=ds['profile'].transpose('time', 'bus').to_pandas())
def attach_conventional_generators(n, costs, ppl, carriers):
_add_missing_carriers_from_costs(n, costs, carriers)
ppl = (ppl.query('carrier in @carriers').join(costs, on='carrier')
.rename(index=lambda s: 'C' + str(s)))
logger.info('Adding {} generators with capacities [MW] \n{}'
.format(len(ppl), ppl.groupby('carrier').p_nom.sum()))
n.madd("Generator", ppl.index,
carrier=ppl.carrier,
bus=ppl.bus,
p_nom=ppl.p_nom,
efficiency=ppl.efficiency,
marginal_cost=ppl.marginal_cost,
capital_cost=0)
logger.warning(f'Capital costs for conventional generators put to 0 EUR/MW.')
def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **config):
_add_missing_carriers_from_costs(n, costs, carriers)
ppl = ppl.query('carrier == "hydro"').reset_index(drop=True)\
.rename(index=lambda s: str(s) + ' hydro')
ror = ppl.query('technology == "Run-Of-River"')
phs = ppl.query('technology == "Pumped Storage"')
hydro = ppl.query('technology == "Reservoir"')
country = ppl['bus'].map(n.buses.country).rename("country")
inflow_idx = ror.index.union(hydro.index)
if not inflow_idx.empty:
dist_key = ppl.loc[inflow_idx, 'p_nom'].groupby(country).transform(normed)
with xr.open_dataarray(profile_hydro) as inflow:
inflow_countries = | pd.Index(country[inflow_idx]) | pandas.Index |
from typing import NamedTuple
import numpy as np
from numpy import linspace
from numpy.random import RandomState
from numpy.testing import assert_allclose, assert_equal
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
import pytest
import scipy.stats as stats
from arch.bootstrap import (
CircularBlockBootstrap,
MovingBlockBootstrap,
StationaryBootstrap,
)
from arch.bootstrap.multiple_comparison import MCS, SPA, StepM
class SPAData(NamedTuple):
rng: RandomState
k: int
t: int
benchmark: np.ndarray
models: np.ndarray
index: pd.DatetimeIndex
benchmark_series: pd.Series
benchmark_df: pd.DataFrame
models_df: pd.DataFrame
@pytest.fixture()
def spa_data():
rng = RandomState(23456)
fixed_rng = stats.chi2(10)
t = 1000
k = 500
benchmark = fixed_rng.rvs(t)
models = fixed_rng.rvs((t, k))
index = pd.date_range("2000-01-01", periods=t)
benchmark_series = pd.Series(benchmark, index=index)
benchmark_df = pd.DataFrame(benchmark, index=index)
models_df = pd.DataFrame(models, index=index)
return SPAData(
rng, k, t, benchmark, models, index, benchmark_series, benchmark_df, models_df
)
def test_equivalence(spa_data):
spa = SPA(spa_data.benchmark, spa_data.models, block_size=10, reps=100)
spa.seed(23456)
spa.compute()
numpy_pvalues = spa.pvalues
spa = SPA(spa_data.benchmark_df, spa_data.models_df, block_size=10, reps=100)
spa.seed(23456)
spa.compute()
pandas_pvalues = spa.pvalues
assert_series_equal(numpy_pvalues, pandas_pvalues)
def test_variances_and_selection(spa_data):
adj_models = spa_data.models + linspace(-2, 0.5, spa_data.k)
spa = SPA(spa_data.benchmark, adj_models, block_size=10, reps=10)
spa.seed(23456)
spa.compute()
variances = spa._loss_diff_var
loss_diffs = spa._loss_diff
demeaned = spa._loss_diff - loss_diffs.mean(0)
t = loss_diffs.shape[0]
kernel_weights = np.zeros(t)
p = 1 / 10.0
for i in range(1, t):
kernel_weights[i] = ((1.0 - (i / t)) * ((1 - p) ** i)) + (
(i / t) * ((1 - p) ** (t - i))
)
direct_vars = (demeaned ** 2).sum(0) / t
for i in range(1, t):
direct_vars += (
2 * kernel_weights[i] * (demeaned[: t - i, :] * demeaned[i:, :]).sum(0) / t
)
assert_allclose(direct_vars, variances)
selection_criteria = -1.0 * np.sqrt((direct_vars / t) * 2 * np.log(np.log(t)))
valid = loss_diffs.mean(0) >= selection_criteria
assert_equal(valid, spa._valid_columns)
# Bootstrap variances
spa = SPA(spa_data.benchmark, spa_data.models, block_size=10, reps=100, nested=True)
spa.seed(23456)
spa.compute()
spa.reset()
bs = spa.bootstrap.clone(demeaned)
variances = spa._loss_diff_var
bootstrap_variances = t * bs.var(lambda x: x.mean(0), reps=100, recenter=True)
assert_allclose(bootstrap_variances, variances)
def test_pvalues_and_critvals(spa_data):
spa = SPA(spa_data.benchmark, spa_data.models, reps=100)
spa.compute()
spa.seed(23456)
simulated_vals = spa._simulated_vals
max_stats = np.max(simulated_vals, 0)
max_loss_diff = np.max(spa._loss_diff.mean(0), 0)
pvalues = np.mean(max_loss_diff <= max_stats, 0)
pvalues = pd.Series(pvalues, index=["lower", "consistent", "upper"])
assert_series_equal(pvalues, spa.pvalues)
crit_vals = np.percentile(max_stats, 90.0, axis=0)
crit_vals = pd.Series(crit_vals, index=["lower", "consistent", "upper"])
assert_series_equal(spa.critical_values(0.10), crit_vals)
def test_errors(spa_data):
spa = SPA(spa_data.benchmark, spa_data.models, reps=100)
with pytest.raises(RuntimeError):
spa.pvalues
with pytest.raises(RuntimeError):
spa.critical_values()
with pytest.raises(RuntimeError):
spa.better_models()
with pytest.raises(ValueError):
SPA(spa_data.benchmark, spa_data.models, bootstrap="unknown")
spa.compute()
with pytest.raises(ValueError):
spa.better_models(pvalue_type="unknown")
with pytest.raises(ValueError):
spa.critical_values(pvalue=1.0)
def test_str_repr(spa_data):
spa = SPA(spa_data.benchmark, spa_data.models)
expected = "SPA(studentization: asymptotic, bootstrap: " + str(spa.bootstrap) + ")"
assert_equal(str(spa), expected)
expected = expected[:-1] + ", ID: " + hex(id(spa)) + ")"
assert_equal(spa.__repr__(), expected)
expected = (
"<strong>SPA</strong>("
+ "<strong>studentization</strong>: asymptotic, "
+ "<strong>bootstrap</strong>: "
+ str(spa.bootstrap)
+ ", <strong>ID</strong>: "
+ hex(id(spa))
+ ")"
)
assert_equal(spa._repr_html_(), expected)
spa = SPA(spa_data.benchmark, spa_data.models, studentize=False, bootstrap="cbb")
expected = "SPA(studentization: none, bootstrap: " + str(spa.bootstrap) + ")"
assert_equal(str(spa), expected)
spa = SPA(
spa_data.benchmark, spa_data.models, nested=True, bootstrap="moving_block"
)
expected = "SPA(studentization: bootstrap, bootstrap: " + str(spa.bootstrap) + ")"
assert_equal(str(spa), expected)
def test_seed_reset(spa_data):
spa = SPA(spa_data.benchmark, spa_data.models, reps=10)
spa.seed(23456)
initial_state = spa.bootstrap.random_state
assert_equal(spa.bootstrap._seed, 23456)
spa.compute()
spa.reset()
assert spa._pvalues == {}
assert_equal(spa.bootstrap.random_state, initial_state)
def test_spa_nested(spa_data):
spa = SPA(spa_data.benchmark, spa_data.models, nested=True, reps=100)
spa.compute()
def test_bootstrap_selection(spa_data):
spa = SPA(spa_data.benchmark, spa_data.models, bootstrap="sb")
assert isinstance(spa.bootstrap, StationaryBootstrap)
spa = SPA(spa_data.benchmark, spa_data.models, bootstrap="cbb")
assert isinstance(spa.bootstrap, CircularBlockBootstrap)
spa = SPA(spa_data.benchmark, spa_data.models, bootstrap="circular")
assert isinstance(spa.bootstrap, CircularBlockBootstrap)
spa = SPA(spa_data.benchmark, spa_data.models, bootstrap="mbb")
assert isinstance(spa.bootstrap, MovingBlockBootstrap)
spa = SPA(spa_data.benchmark, spa_data.models, bootstrap="moving block")
assert isinstance(spa.bootstrap, MovingBlockBootstrap)
def test_single_model(spa_data):
spa = SPA(spa_data.benchmark, spa_data.models[:, 0])
spa.compute()
spa = SPA(spa_data.benchmark_series, spa_data.models_df.iloc[:, 0])
spa.compute()
class TestStepM(object):
@classmethod
def setup_class(cls):
cls.rng = RandomState(23456)
fixed_rng = stats.chi2(10)
cls.t = t = 1000
cls.k = k = 500
cls.benchmark = fixed_rng.rvs(t)
cls.models = fixed_rng.rvs((t, k))
index = | pd.date_range("2000-01-01", periods=t) | pandas.date_range |
#web framework
import streamlit as st
#reading the data frame
import pandas as pd
#handling with data matrices
import numpy as np
#encode results from the tuned hyperparameters as a csv file
import base64
import plotly.graph_objects as go
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_diabetes
#Page Layout
st.set_page_config('Machine Learning Hyperparameter Optimization App', layout='wide')
#RandomForestRegression Edition
st.write("""#Machine Learning Hyperparameter Optimization App#
**(Regression Edition)**
In this implementation , the *RandomForestRegressor()* function is used in this application to build a
regression model using the Random Forest Machine Learning algorithm
""")
#Sidebar Menu - Collects user input features to download into dataframe
# Sidebar - Collects user input features into dataframe
st.sidebar.header('Upload your CSV data')
uploaded_file = st.sidebar.file_uploader("Upload your input CSV file", type=["csv"])
st.sidebar.markdown("""
[Example CSV input file](https://raw.githubusercontent.com/dataprofessor/data/master/delaney_solubility_with_descriptors.csv)
""")
# Sidebar - Specify parameter settings
st.sidebar.header('Set Parameters')
split_size = st.sidebar.slider('Data split ratio (% for Training Set)', 10, 90, 80, 5)
st.sidebar.subheader('Learning Parameters')
parameter_n_estimators = st.sidebar.slider('Number of estimators (n_estimators)', 0, 500, (10,50), 50)
parameter_n_estimators_step = st.sidebar.number_input('Step size for n_estimators', 10)
st.sidebar.write('---')
parameter_max_features = st.sidebar.slider('Max features (max_features)', 1, 50, (1,3), 1)
st.sidebar.number_input('Step size for max_features', 1)
st.sidebar.write('---')
parameter_min_samples_split = st.sidebar.slider('Minimum number of samples required to split an internal node (min_samples_split)', 1, 10, 2, 1)
parameter_min_samples_leaf = st.sidebar.slider('Minimum number of samples required to be at a leaf node (min_samples_leaf)', 1, 10, 2, 1)
st.sidebar.subheader('General Parameters')
parameter_random_state = st.sidebar.slider('Seed number (random_state)', 0, 1000, 42, 1)
parameter_criterion = st.sidebar.select_slider('Performance measure (criterion)', options=['mse', 'mae'])
parameter_bootstrap = st.sidebar.select_slider('Bootstrap samples when building trees (bootstrap)', options=[True, False])
parameter_oob_score = st.sidebar.select_slider('Whether to use out-of-bag samples to estimate the R^2 on unseen data (oob_score)', options=[False, True])
parameter_n_jobs = st.sidebar.select_slider('Number of jobs to run in parallel (n_jobs)', options=[1, -1])
n_estimators_range = np.arange(parameter_n_estimators[0], parameter_n_estimators[1]+parameter_n_estimators_step, parameter_n_estimators_step)
max_features_range = np.arange(parameter_max_features[0], parameter_max_features[1]+1, 1)
param_grid = dict(max_features=max_features_range, n_estimators=n_estimators_range)
# Main panel
# Displays the dataset
st.subheader('Dataset')
# Model building
def filedownload(df):
csv = df.to_csv(index=False)
# strings <-> bytes conversions
b64 = base64.b64encode(csv.encode()).decode()
href = f'<a href="data:file/csv;base64,{b64}" download="model_performance.csv">Download CSV File</a>'
return href
def build_model(df):
X = df.iloc[:, :-1] # Using all column except for the last column as X
Y = df.iloc[:, -1] # Selecting the last column as Y
st.markdown(
'A model is being built to predict the following **Y** variable:')
st.info(Y.name)
# Data splitting
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, test_size=split_size)
#X_train.shape, Y_train.shape
#X_test.shape, Y_test.shape
rf = RandomForestRegressor(n_estimators=parameter_n_estimators,
random_state=parameter_random_state,
max_features=parameter_max_features,
criterion=parameter_criterion,
min_samples_split=parameter_min_samples_split,
min_samples_leaf=parameter_min_samples_leaf,
bootstrap=parameter_bootstrap,
oob_score=parameter_oob_score,
n_jobs=parameter_n_jobs)
grid = GridSearchCV(estimator=rf, param_grid=param_grid, cv=5)
grid.fit(X_train, Y_train)
st.subheader('Model Performance')
Y_pred_test = grid.predict(X_test)
st.write('Coefficient of determination ($R^2$):')
st.info(r2_score(Y_test, Y_pred_test))
st.write('Error (MSE or MAE):')
st.info(mean_squared_error(Y_test, Y_pred_test))
st.write("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
st.subheader('Model Parameters')
st.write(grid.get_params())
#-----Process grid data-----#
grid_results = pd.concat([ | pd.DataFrame(grid.cv_results_["params"]) | pandas.DataFrame |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
##find parent directory and import model
#parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
#sys.path.append(parentddir)
from ..agdrift_exe import Agdrift
test = {}
class TestAgdrift(unittest.TestCase):
"""
IEC unit tests.
"""
def setUp(self):
"""
setup the test as needed
e.g. pandas to open agdrift qaqc csv
Read qaqc csv and create pandas DataFrames for inputs and expected outputs
:return:
"""
pass
def tearDown(self):
"""
teardown called after each test
e.g. maybe write test results to some text file
:return:
"""
pass
def create_agdrift_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty agdrift object
agdrift_empty = Agdrift(df_empty, df_empty)
return agdrift_empty
def test_validate_sim_scenarios(self):
"""
:description determines if user defined scenarios are valid for processing
:param application_method: type of Tier I application method employed
:param aquatic_body_def: type of endpoint of concern (e.g., pond, wetland); implies whether
: endpoint of concern parameters (e.g.,, pond width) are set (i.e., by user or EPA standard)
:param drop_size_*: qualitative description of spray droplet size for aerial & ground applications
:param boom_height: qualitative height above ground of spray boom
:param airblast_type: type of orchard being sprayed
:NOTE we perform an additional validation check related to distances later in the code just before integration
:return
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
agdrift_empty.out_sim_scenario_chk = pd.Series([], dtype='object')
expected_result = pd.Series([
'Valid Tier I Aquatic Aerial Scenario',
'Valid Tier I Terrestrial Aerial Scenario',
'Valid Tier I Aquatic Aerial Scenario',
'Valid Tier I Terrestrial Aerial Scenario',
'Valid Tier I Aquatic Aerial Scenario',
'Valid Tier I Terrestrial Ground Scenario',
'Valid Tier I Aquatic Ground Scenario',
'Valid Tier I Terrestrial Ground Scenario',
'Valid Tier I Aquatic Ground Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Valid Tier I Aquatic Airblast Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Valid Tier I Aquatic Airblast Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Invalid Tier I Aquatic Aerial Scenario',
'Invalid Tier I Aquatic Ground Scenario',
'Invalid Tier I Aquatic Airblast Scenario',
'Invalid Tier I Terrestrial Aerial Scenario',
'Valid Tier I Terrestrial Ground Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Invalid scenario ecosystem_type',
'Invalid Tier I Aquatic Assessment application method',
'Invalid Tier I Terrestrial Assessment application method'],dtype='object')
try:
#set test data
agdrift_empty.num_simulations = len(expected_result)
agdrift_empty.application_method = pd.Series(
['tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_aerial',
'tier_1_ground',
'tier_1_airblast',
'tier_1_aerial',
'tier_1_ground',
'tier_1_airblast',
'tier_1_aerial',
'Tier II Aerial',
'Tier III Aerial'], dtype='object')
agdrift_empty.ecosystem_type = pd.Series(
['aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'aquatic_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'terrestrial_assessment',
'terrestrial_assessment',
'Field Assessment',
'aquatic_assessment',
'terrestrial_assessment'], dtype='object')
agdrift_empty.aquatic_body_type = pd.Series(
['epa_defined_pond',
'NaN',
'epa_defined_wetland',
'NaN',
'user_defined_pond',
'NaN',
'user_defined_wetland',
'NaN',
'epa_defined_wetland',
'NaN',
'user_defined_pond',
'NaN',
'user_defined_wetland',
'NaN',
'Defined Pond',
'user_defined_pond',
'epa_defined_pond',
'NaN',
'NaN',
'NaN',
'epa_defined_pond',
'user_defined_wetland',
'user_defined_pond'], dtype='object')
agdrift_empty.terrestrial_field_type = pd.Series(
['NaN',
'user_defined_terrestrial',
'NaN',
'epa_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'epa_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'NaN',
'NaN',
'user_defined_terrestrial',
'user_defined_terrestrial',
'user_defined_terrestrial',
'NaN',
'NaN',
'user_defined_terrestrial'], dtype='object')
agdrift_empty.drop_size_aerial = pd.Series(
['very_fine_to_fine',
'fine_to_medium',
'medium_to_coarse',
'coarse_to_very_coarse',
'fine_to_medium',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'medium_to_coarse',
'NaN',
'very_fine_to_medium',
'NaN',
'very_fine Indeed',
'NaN',
'very_fine_to_medium',
'medium_to_coarse',
'NaN'], dtype='object')
agdrift_empty.drop_size_ground = pd.Series(
['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'very_fine',
'fine_to_medium-coarse',
'very_fine',
'fine_to_medium-coarse',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'very_fine',
'NaN',
'fine_to_medium-coarse',
'very_fine',
'NaN',
'very_fine_to_medium',
'NaN',
'very_fine'], dtype='object')
agdrift_empty.boom_height = pd.Series(
['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'high',
'low',
'high',
'low',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'high',
'NaN',
'NaN',
'NaN',
'NaN'],dtype='object')
agdrift_empty.airblast_type = pd.Series(
['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'normal',
'dense',
'sparse',
'orchard',
'vineyard',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'vineyard',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.validate_sim_scenarios()
result = agdrift_empty.out_sim_scenario_chk
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_set_sim_scenario_id(self):
"""
:description provides scenario ids per simulation that match scenario names (i.e., column_names) from SQL database
:param out_sim_scenario_id: scenario name as assigned to individual simulations
:param num_simulations: number of simulations to assign scenario names
:param out_sim_scenario_chk: from previous method where scenarios were checked for validity
:param application_method: application method of scenario
:param drop_size_*: qualitative description of spray droplet size for aerial and ground applications
:param boom_height: qualitative height above ground of spray boom
:param airblast_type: type of airblast application (e.g., vineyard, orchard)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series(['aerial_vf2f',
'aerial_f2m',
'aerial_m2c',
'aerial_c2vc',
'ground_low_vf',
'ground_low_fmc',
'ground_high_vf',
'ground_high_fmc',
'airblast_normal',
'airblast_dense',
'airblast_sparse',
'airblast_vineyard',
'airblast_orchard',
'Invalid'], dtype='object')
try:
agdrift_empty.num_simulations = len(expected_result)
agdrift_empty.out_sim_scenario_chk = pd.Series(['Valid Tier I Aerial',
'Valid Tier I Aerial',
'Valid Tier I Aerial',
'Valid Tier I Aerial',
'Valid Tier I Ground',
'Valid Tier I Ground',
'Valid Tier I Ground',
'Valid Tier I Ground',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Invalid Scenario'], dtype='object')
agdrift_empty.application_method = pd.Series(['tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_aerial'], dtype='object')
agdrift_empty.drop_size_aerial = pd.Series(['very_fine_to_fine',
'fine_to_medium',
'medium_to_coarse',
'coarse_to_very_coarse',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.drop_size_ground = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'very_fine',
'fine_to_medium-coarse',
'very_fine',
'fine_to_medium-coarse',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.boom_height = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'low',
'low',
'high',
'high',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.airblast_type = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'normal',
'dense',
'sparse',
'vineyard',
'orchard',
'NaN'], dtype='object')
agdrift_empty.set_sim_scenario_id()
result = agdrift_empty.out_sim_scenario_id
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_assign_column_names(self):
"""
:description assigns column names (except distaqnce column) from sql database to internal scenario names
:param column_name: short name for pesiticide application scenario for which distance vs deposition data is provided
:param scenario_name: internal variable for holding scenario names
:param scenario_number: index for scenario_name (this method assumes the distance values could occur in any column
:param distance_name: internal name for the column holding distance data
:NOTE to test both outputs of this method I simply appended them together
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
agdrift_empty.scenario_name = pd.Series([], dtype='object')
expected_result = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc',
'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse',
'airblast_vineyard', 'airblast_orchard'], dtype='object')
try:
agdrift_empty.column_names = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc',
'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse',
'airblast_vineyard', 'airblast_orchard', 'distance_ft'])
#call method to assign scenario names
agdrift_empty.assign_column_names()
result = agdrift_empty.scenario_name
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_get_distances(self):
"""
:description retrieves distance values for deposition scenario datasets
: all scenarios use same distances
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE any blank fields are filled with 'nan'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
expected_result = pd.Series([], dtype='float')
try:
expected_result = [0.,0.102525,0.20505,0.4101,0.8202,1.6404,3.2808,4.9212,6.5616,9.8424,13.1232,19.6848,26.2464,
32.808,39.3696,45.9312,52.4928,59.0544,65.616,72.1776,78.7392,85.3008,91.8624,98.424,104.9856,
111.5472,118.1088,124.6704,131.232,137.7936,144.3552,150.9168,157.4784,164.04,170.6016,177.1632,
183.7248,190.2864,196.848,203.4096,209.9712,216.5328,223.0944,229.656,236.2176,242.7792,249.3408,
255.9024,262.464,269.0256,275.5872,282.1488,288.7104,295.272,301.8336,308.3952,314.9568,321.5184,
328.08,334.6416,341.2032,347.7648,354.3264,360.888,367.4496,374.0112,380.5728,387.1344,393.696,
400.2576,406.8192,413.3808,419.9424,426.504,433.0656,439.6272,446.1888,452.7504,459.312,465.8736,
472.4352,478.9968,485.5584,492.12,498.6816,505.2432,511.8048,518.3664,524.928,531.4896,538.0512,
544.6128,551.1744,557.736,564.2976,570.8592,577.4208,583.9824,590.544,597.1056,603.6672,610.2288,
616.7904,623.352,629.9136,636.4752,643.0368,649.5984,656.16,662.7216,669.2832,675.8448,682.4064,
688.968,695.5296,702.0912,708.6528,715.2144,721.776,728.3376,734.8992,741.4608,748.0224,754.584,
761.1456,767.7072,774.2688,780.8304,787.392,793.9536,800.5152,807.0768,813.6384,820.2,826.7616,
833.3232,839.8848,846.4464,853.008,859.5696,866.1312,872.6928,879.2544,885.816,892.3776,898.9392,
905.5008,912.0624,918.624,925.1856,931.7472,938.3088,944.8704,951.432,957.9936,964.5552,971.1168,
977.6784,984.24,990.8016,997.3632]
agdrift_empty.distance_name = 'distance_ft'
agdrift_empty.num_db_values = len(expected_result)
result = agdrift_empty.get_distances(agdrift_empty.num_db_values)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_get_scenario_deposition_data(self):
"""
:description retrieves deposition data for all scenarios from sql database
: and checks that for each the first, last, and total number of values
: are correct
:param scenario: name of scenario for which data is to be retrieved
:param num_values: number of values included in scenario datasets
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
#scenario_data = pd.Series([[]], dtype='float')
result = pd.Series([], dtype='float')
#changing expected values to the 161st
expected_result = [0.50013,0.041273,161.0, #aerial_vf2f
0.49997,0.011741,161.0, #aerial_f2m
0.4999,0.0053241,161.0, #aerial_m2c
0.49988,0.0031189,161.0, #aerial_c2vc
1.019339,9.66E-04,161.0, #ground_low_vf
1.007885,6.13E-04,161.0, #ground_low_fmc
1.055205,1.41E-03,161.0, #ground_high_vf
1.012828,7.72E-04,161.0, #ground_high_fmc
8.91E-03,3.87E-05,161.0, #airblast_normal
0.1155276,4.66E-04,161.0, #airblast_dense
0.4762651,5.14E-05,161.0, #airblast_sparse
3.76E-02,3.10E-05,161.0, #airblast_vineyard
0.2223051,3.58E-04,161.0] #airblast_orchard
try:
agdrift_empty.num_db_values = 161 #set number of data values in sql db
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
#agdrift_empty.db_name = 'sqlite_agdrift_distance.db'
#this is the list of scenario names (column names) in sql db (the order here is important because
#the expected values are ordered in this manner
agdrift_empty.scenario_name = ['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard']
#cycle through reading scenarios and building result list
for i in range(len(agdrift_empty.scenario_name)):
#get scenario data
scenario_data = agdrift_empty.get_scenario_deposition_data(agdrift_empty.scenario_name[i],
agdrift_empty.num_db_values)
print(scenario_data)
#extract 1st and last values of scenario data and build result list (including how many values are
#retrieved for each scenario
if i == 0:
#fix this
result = [scenario_data[0], scenario_data[agdrift_empty.num_db_values - 1],
float(len(scenario_data))]
else:
result.extend([scenario_data[0], scenario_data[agdrift_empty.num_db_values - 1],
float(len(scenario_data))])
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_get_column_names(self):
"""
:description retrieves column names from sql database (sqlite_agdrift_distance.db)
: (each column name refers to a specific deposition scenario;
: the scenario name is used later to retrieve the deposition data)
:parameter output name of sql database table from which to retrieve requested data
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
result = pd.Series([], dtype='object')
expected_result = ['distance_ft','aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard']
try:
result = agdrift_empty.get_column_names()
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_filter_arrays(self):
"""
:description eliminate blank data cells (i.e., distances for which no deposition value is provided)
(and thus reduce the number of x,y values to be used)
:parameter x_in: array of distance values associated with values for a deposition scenario (e.g., Aerial/EPA Defined Pond)
:parameter y_in: array of deposition values associated with a deposition scenario (e.g., Aerial/EPA Defined Pond)
:parameter x_out: processed array of x_in values eliminating indices of blank distance/deposition values
:parameter y_out: processed array of y_in values eliminating indices of blank distance/deposition values
:NOTE y_in array is assumed to be populated by values >= 0. except for the blanks as 'nan' entries
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([0.,1.,4.,5.,6.,7.], dtype='float')
expected_result_y = pd.Series([10.,11.,14.,15.,16.,17.], dtype='float')
try:
x_in = pd.Series([0.,1.,2.,3.,4.,5.,6.,7.], dtype='float')
y_in = pd.Series([10.,11.,'nan','nan',14.,15.,16.,17.], dtype='float')
x_out, y_out = agdrift_empty.filter_arrays(x_in, y_in)
result_x = x_out
result_y = y_out
npt.assert_allclose(result_x, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(result_y, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result_x, expected_result_x]
tab = [result_y, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_list_sims_per_scenario(self):
"""
:description scan simulations and count number and indices of simulations that apply to each scenario
:parameter num_scenarios number of deposition scenarios included in SQL database
:parameter num_simulations number of simulations included in this model execution
:parameter scenario_name name of deposition scenario as recorded in SQL database
:parameter out_sim_scenario_id identification of deposition scenario specified per model run simulation
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_num_sims = pd.Series([2,2,2,2,2,2,2,2,2,2,2,2,2], dtype='int')
expected_sim_indices = pd.Series([[0,13,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[1,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[2,15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[3,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[4,17,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[5,18,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[6,19,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[7,20,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[8,21,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[9,22,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[10,23,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[11,24,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[12,25,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]], dtype='int')
try:
agdrift_empty.scenario_name = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard'], dtype='object')
agdrift_empty.out_sim_scenario_id = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard','aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard'], dtype='object')
agdrift_empty.num_simulations = len(agdrift_empty.out_sim_scenario_id)
agdrift_empty.num_scenarios = len(agdrift_empty.scenario_name)
result_num_sims, result_sim_indices = agdrift_empty.list_sims_per_scenario()
npt.assert_array_equal(result_num_sims, expected_num_sims, err_msg='', verbose=True)
npt.assert_array_equal(result_sim_indices, expected_sim_indices, err_msg='', verbose=True)
finally:
tab = [result_num_sims, expected_num_sims, result_sim_indices, expected_sim_indices]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_determine_area_dimensions(self):
"""
:description determine relevant area/length/depth of waterbody or terrestrial area
:param i: simulation number
:param ecosystem_type: type of assessment to be conducted
:param aquatic_body_type: source of dimensional data for area (EPA or User defined)
:param terrestrial_field_type: source of dimensional data for area (EPA or User defined)
:param *_width: default or user specified width of waterbody or terrestrial field
:param *_length: default or user specified length of waterbody or terrestrial field
:param *_depth: default or user specified depth of waterbody or terrestrial field
:NOTE all areas, i.e., ponds, wetlands, and terrestrial fields are of 1 hectare size; the user can elect
to specify a width other than the default width but it won't change the area size; thus for
user specified areas the length is calculated and not specified by the user)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_width = pd.Series([208.7, 208.7, 100., 400., 150., 0.], dtype='float')
expected_length = pd.Series([515.8, 515.8, 1076.39, 269.098, 717.593, 0.], dtype='float')
expected_depth = pd.Series([6.56, 0.4921, 7., 23., 0., 0.], dtype='float')
try:
agdrift_empty.ecosystem_type = pd.Series(['aquatic_assessment',
'aquatic_assessment',
'aquatic_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'terrestrial_assessment'], dtype='object')
agdrift_empty.aquatic_body_type = pd.Series(['epa_defined_pond',
'epa_defined_wetland',
'user_defined_pond',
'user_defined_wetland',
'NaN',
'NaN'], dtype='object')
agdrift_empty.terrestrial_field_type = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'user_defined_terrestrial',
'epa_defined_terrestrial'], dtype='object')
num_simulations = len(agdrift_empty.ecosystem_type)
agdrift_empty.default_width = 208.7
agdrift_empty.default_length = 515.8
agdrift_empty.default_pond_depth = 6.56
agdrift_empty.default_wetland_depth = 0.4921
agdrift_empty.user_pond_width = pd.Series(['NaN', 'NaN', 100., 'NaN', 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_pond_depth = pd.Series(['NaN', 'NaN', 7., 'NaN', 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_wetland_width = pd.Series(['NaN', 'NaN', 'NaN', 400., 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_wetland_depth = pd.Series(['NaN','NaN', 'NaN', 23., 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_terrestrial_width = pd.Series(['NaN', 'NaN', 'NaN', 'NaN', 150., 'NaN'], dtype='float')
width_result = pd.Series(num_simulations * ['NaN'], dtype='float')
length_result = pd.Series(num_simulations * ['NaN'], dtype='float')
depth_result = pd.Series(num_simulations * ['NaN'], dtype='float')
agdrift_empty.out_area_width = pd.Series(num_simulations * ['nan'], dtype='float')
agdrift_empty.out_area_length = pd.Series(num_simulations * ['nan'], dtype='float')
agdrift_empty.out_area_depth = pd.Series(num_simulations * ['nan'], dtype='float')
agdrift_empty.sqft_per_hectare = 107639
for i in range(num_simulations):
width_result[i], length_result[i], depth_result[i] = agdrift_empty.determine_area_dimensions(i)
npt.assert_allclose(width_result, expected_width, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(length_result, expected_length, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(depth_result, expected_depth, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [width_result, expected_width, length_result, expected_length, depth_result, expected_depth]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_foa(self):
"""
:description calculation of average deposition over width of water body
:param integration_result result of integration of deposition curve across the distance
: beginning at the near distance and extending to the far distance of the water body
:param integration_distance effectively the width of the water body
:param avg_dep_foa average deposition rate across the width of the water body
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([0.1538462, 0.5, 240.])
try:
integration_result = pd.Series([1.,125.,3e5], dtype='float')
integration_distance = pd.Series([6.5,250.,1250.], dtype='float')
result = agdrift_empty.calc_avg_dep_foa(integration_result, integration_distance)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_lbac(self):
"""
Deposition calculation.
:param avg_dep_foa: average deposition over width of water body as fraction of applied
:param application_rate: actual application rate
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([6.5, 3.125e4, 3.75e8])
try:
avg_dep_foa = pd.Series([1.,125.,3e5], dtype='float')
application_rate = pd.Series([6.5,250.,1250.], dtype='float')
result = agdrift_empty.calc_avg_dep_lbac(avg_dep_foa, application_rate)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_foa_from_lbac(self):
"""
Deposition calculation.
:param avg_dep_foa: average deposition over width of water body as fraction of applied
:param application_rate: actual application rate
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([1.553846e-01, 8.8e-06, 4.e-08])
try:
avg_dep_lbac = pd.Series([1.01, 0.0022, 0.00005], dtype='float')
application_rate = pd.Series([6.5,250.,1250.], dtype='float')
result = agdrift_empty.calc_avg_dep_foa_from_lbac(avg_dep_lbac, application_rate)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_lbac_from_gha(self):
"""
Deposition calculation.
:param avg_dep_gha: average deposition over width of water body in units of grams/hectare
:param gms_per_lb: conversion factor to convert lbs to grams
:param acres_per_hectare: conversion factor to convert hectares to acres
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([0.01516739, 0.111524, 0.267659])
try:
avg_dep_gha = pd.Series([17., 125., 3e2], dtype='float')
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.acres_per_hectare = 2.471
result = agdrift_empty.calc_avg_dep_lbac_from_gha(avg_dep_gha)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_lbac_from_waterconc_ngl(self):
"""
:description calculate the average deposition onto the pond/wetland/field
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param area_width: average width of water body
:parem area_length: average length of water body
:param area_depth: average depth of water body
:param gms_per_lb: conversion factor to convert lbs to grams
:param ng_per_gram conversion factor
:param sqft_per_acre conversion factor
:param liters_per_ft3 conversion factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([2.311455e-05, 2.209479e-03, 2.447423e-03])
try:
avg_waterconc_ngl = pd.Series([17., 125., 3e2], dtype='float')
area_width = pd.Series([50., 200., 500.], dtype='float')
area_length = pd.Series([6331., 538., 215.], dtype='float')
area_depth = pd.Series([0.5, 6.5, 3.], dtype='float')
agdrift_empty.liters_per_ft3 = 28.3168
agdrift_empty.sqft_per_acre = 43560.
agdrift_empty.ng_per_gram = 1.e9
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.acres_per_hectare = 2.471
result = agdrift_empty.calc_avg_dep_lbac_from_waterconc_ngl(avg_waterconc_ngl, area_width,
area_length, area_depth)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_lbac_from_mgcm2(self):
"""
:description calculate the average deposition of pesticide over the terrestrial field in lbs/acre
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param area_depth: average depth of water body
:param gms_per_lb: conversion factor to convert lbs to grams
:param mg_per_gram conversion factor
:param sqft_per_acre conversion factor
:param cm2_per_ft2 conversion factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([2.676538e-02, 2.2304486, 44.608973])
try:
avg_fielddep_mgcm2 = pd.Series([3.e-4, 2.5e-2, 5.e-01])
agdrift_empty.sqft_per_acre = 43560.
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.cm2_per_ft2 = 929.03
agdrift_empty.mg_per_gram = 1.e3
result = agdrift_empty.calc_avg_dep_lbac_from_mgcm2(avg_fielddep_mgcm2)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_gha(self):
"""
:description average deposition over width of water body in grams per acre
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param gms_per_lb: conversion factor to convert lbs to grams
:param acres_per_hectare: conversion factor to convert acres to hectares
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([1.401061, 0.3648362, 0.03362546])
try:
avg_dep_lbac = pd.Series([1.25e-3,3.255e-4,3e-5], dtype='float')
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.acres_per_hectare = 2.47105
result = agdrift_empty.calc_avg_dep_gha(avg_dep_lbac)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_waterconc_ngl(self):
"""
:description calculate the average concentration of pesticide in the pond/wetland
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param area_width: average width of water body
:parem area_length: average length of water body
:param area_depth: average depth of water body
:param gms_per_lb: conversion factor to convert lbs to grams
:param ng_per_gram conversion factor
:param sqft_per_acre conversion factor
:param liters_per_ft3 conversion factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([70.07119, 18.24654, 22.41823])
try:
avg_dep_lbac = pd.Series([1.25e-3,3.255e-4,3e-5], dtype='float')
area_width = pd.Series([6.56, 208.7, 997.], dtype='float')
area_length = pd.Series([1.640838e4, 515.7595, 107.9629], dtype='float')
area_depth = pd.Series([6.56, 6.56, 0.4921], dtype='float')
agdrift_empty.ng_per_gram = 1.e9
agdrift_empty.liters_per_ft3 = 28.3168
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.sqft_per_acre = 43560.
result = agdrift_empty.calc_avg_waterconc_ngl(avg_dep_lbac ,area_width, area_length, area_depth)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_fielddep_mgcm2(self):
"""
:description calculate the average deposition of pesticide over the terrestrial field
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param area_depth: average depth of water body
:param gms_per_lb: conversion factor to convert lbs to grams
:param mg_per_gram conversion factor
:param sqft_per_acre conversion factor
:param cm2_per_ft2 conversion factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([1.401063e-5, 3.648369e-6, 3.362552e-7])
try:
avg_dep_lbac = pd.Series([1.25e-3,3.255e-4,3e-5], dtype='float')
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.sqft_per_acre = 43560.
agdrift_empty.mg_per_gram = 1.e3
agdrift_empty.cm2_per_ft2 = 929.03
result = agdrift_empty.calc_avg_fielddep_mgcm2(avg_dep_lbac)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_generate_running_avg(self):
"""
:description retrieves values for distance and the first deposition scenario from the sql database
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE any blank fields are filled with 'nan'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
expected_result_npts = pd.Series([], dtype='object')
x_array_in = pd.Series([], dtype='float')
y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = pd.Series([], dtype='float')
try:
expected_result_x = [0.,0.102525,0.20505,0.4101,0.8202,1.6404,3.2808,4.9212,6.5616,9.8424,13.1232,19.6848,26.2464,
32.808,39.3696,45.9312,52.4928,59.0544,65.616,72.1776,78.7392,85.3008,91.8624,98.424,104.9856,
111.5472,118.1088,124.6704,131.232,137.7936,144.3552,150.9168,157.4784,164.04,170.6016,177.1632,
183.7248,190.2864,196.848,203.4096,209.9712,216.5328,223.0944,229.656,236.2176,242.7792,249.3408,
255.9024,262.464,269.0256,275.5872,282.1488,288.7104,295.272,301.8336,308.3952,314.9568,321.5184,
328.08,334.6416,341.2032,347.7648,354.3264,360.888,367.4496,374.0112,380.5728,387.1344,393.696,
400.2576,406.8192,413.3808,419.9424,426.504,433.0656,439.6272,446.1888,452.7504,459.312,465.8736,
472.4352,478.9968,485.5584,492.12,498.6816,505.2432,511.8048,518.3664,524.928,531.4896,538.0512,
544.6128,551.1744,557.736,564.2976,570.8592,577.4208,583.9824,590.544,597.1056,603.6672,610.2288,
616.7904,623.352,629.9136,636.4752,643.0368,649.5984,656.16,662.7216,669.2832,675.8448,682.4064,
688.968,695.5296,702.0912,708.6528,715.2144,721.776,728.3376,734.8992,741.4608,748.0224,754.584,
761.1456,767.7072,774.2688,780.8304,787.392,793.9536,800.5152,807.0768,813.6384,820.2,826.7616,
833.3232,839.8848,846.4464,853.008,859.5696,866.1312,872.6928,879.2544,885.816,892.3776,898.9392,
905.5008,912.0624,918.624,925.1856,931.7472,938.3088,944.8704,951.432,957.9936,964.5552,971.1168,
977.6784,984.24,990.8016]
expected_result_y = [0.364712246,0.351507467,0.339214283,0.316974687,0.279954504,0.225948786,0.159949625,
0.123048839,0.099781801,0.071666234,0.056352938,0.03860139,0.029600805,0.024150524,
0.020550354,0.01795028,0.015967703,0.014467663,0.013200146,0.01215011,0.011300098,
0.010550085,0.009905072,0.009345065,0.008845057,0.008400051,0.008000046,0.007635043,
0.007300039,0.007000034,0.006725033,0.00646503,0.006230027,0.006010027,0.005805023,
0.005615023,0.005435021,0.00527002,0.00511002,0.004960017,0.004820017,0.004685016,
0.004560015,0.004440015,0.004325013,0.004220012,0.004120012,0.004020012,0.003925011,
0.003835011,0.00375001,0.00367001,0.00359001,0.00351001,0.003435009,0.003365009,
0.003300007,0.003235009,0.003170007,0.003110007,0.003055006,0.003000007,0.002945006,
0.002895006,0.002845006,0.002795006,0.002745006,0.002695006,0.002650005,0.002610005,
0.002570005,0.002525006,0.002485004,0.002450005,0.002410005,0.002370005,0.002335004,
0.002300005,0.002265004,0.002235004,0.002205004,0.002175004,0.002145004,0.002115004,
0.002085004,0.002055004,0.002025004,0.002000002,0.001975004,0.001945004,0.001920002,
0.001900002,0.001875004,0.001850002,0.001830002,0.001805004,0.001780002,0.001760002,
0.001740002,0.001720002,0.001700002,0.001680002,0.001660002,0.001640002,0.001620002,
0.001605001,0.001590002,0.001570002,0.001550002,0.001535001,0.001520002,0.001500002,
0.001485001,0.001470002,0.001455001,0.001440002,0.001425001,0.001410002,0.001395001,
0.001385001,0.001370002,0.001355001,0.001340002,0.001325001,0.001315001,0.001305001,
0.001290002,0.001275001,0.001265001,0.001255001,0.001245001,0.001230002,0.001215001,
0.001205001,0.001195001,0.001185001,0.001175001,0.001165001,0.001155001,0.001145001,
0.001135001,0.001125001,0.001115001,0.001105001,0.001095001,0.001085001,0.001075001,
0.001065001,0.00106,0.001055001,0.001045001,0.001035001,0.001025001,0.001015001,
0.001005001,0.0009985,0.000993001,0.000985001,0.000977001,0.000969501]
expected_result_npts = 160
x_dist = 6.56
agdrift_empty.distance_name = 'distance_ft'
agdrift_empty.scenario_name = 'ground_low_vf'
agdrift_empty.num_db_values = 161
x_array_in = agdrift_empty.get_distances(agdrift_empty.num_db_values)
y_array_in = agdrift_empty.get_scenario_deposition_data(agdrift_empty.scenario_name, agdrift_empty.num_db_values)
x_array_out, y_array_out, npts_out = agdrift_empty.generate_running_avg(agdrift_empty.num_db_values,
x_array_in, y_array_in, x_dist)
# write output arrays to excel file -- just for debugging
agdrift_empty.write_arrays_to_csv(x_array_out, y_array_out, "output_array_generate.csv")
npt.assert_array_equal(expected_result_npts, npts_out, verbose=True)
npt.assert_allclose(x_array_out, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(y_array_out, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
pass
tab1 = [x_array_out, expected_result_x]
tab2 = [y_array_out, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print('expected {0} number of points and got {1} points'.format(expected_result_npts, npts_out))
print("x_array result/x_array_expected")
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print("y_array result/y_array_expected")
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
def test_generate_running_avg1(self):
"""
:description creates a running average for a specified x axis width (e.g., 7-day average values of an array)
:param x_array_in: array of x-axis values
:param y_array_in: array of y-axis values
:param num_db_values: number of points in the input arrays
:param x_array_out: array of x-zxis values in output array
:param y_array_out: array of y-axis values in output array
:param npts_out: number of points in the output array
:param x_dist: width in x_axis units of running weighted average
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE This test uses a uniformly spaced x_array and monotonically increasing y_array
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
expected_result_npts = pd.Series([], dtype='object')
x_array_in = pd.Series([], dtype='float')
y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = pd.Series([], dtype='float')
try:
expected_result_x = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,
11.,12.,13.,14.,15.,16.,17.,18.,19.,20.,
21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.]
expected_result_y = [2.5,3.5,4.5,5.5,6.5,7.5,8.5,9.5,10.5,11.5,
12.5,13.5,14.5,15.5,16.5,17.5,18.5,19.5,20.5,21.5,
22.5,23.5,24.5,25.5,26.5,27.5,28.5,29.5,30.5,31.5,
32.5,33.5,34.5,35.5,36.5,37.5,38.5,39.5,40.5,41.5,
42.5,43.5,44.5,45.5, 46.5]
expected_result_npts = 45
x_dist = 5.
num_db_values = 51
x_array_in = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,
11.,12.,13.,14.,15.,16.,17.,18.,19.,20.,
21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.,45.,46.,47.,48.,49.,50.]
y_array_in = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,
11.,12.,13.,14.,15.,16.,17.,18.,19.,20.,
21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.,45.,46.,47.,48.,49.,50.]
x_array_out, y_array_out, npts_out = agdrift_empty.generate_running_avg(num_db_values, x_array_in,
y_array_in, x_dist)
npt.assert_array_equal(expected_result_npts, npts_out, verbose=True)
npt.assert_allclose(x_array_out, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(y_array_out, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
pass
tab1 = [x_array_out, expected_result_x]
tab2 = [y_array_out, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print('expected {0} number of points and got {1} points'.format(expected_result_npts, npts_out))
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
def test_generate_running_avg2(self):
"""
:description creates a running average for a specified x axis width (e.g., 7-day average values of an array)
:param x_array_in: array of x-axis values
:param y_array_in: array of y-axis values
:param num_db_values: number of points in the input arrays
:param x_array_out: array of x-zxis values in output array
:param y_array_out: array of y-axis values in output array
:param npts_out: number of points in the output array
:param x_dist: width in x_axis units of running weighted average
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE This test uses a non-uniformly spaced x_array and monotonically increasing y_array
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
expected_result_npts = pd.Series([], dtype='object')
x_array_in = pd.Series([], dtype='float')
y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = pd.Series([], dtype='float')
try:
expected_result_x = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,
11.5,12.,13.,14.,15.,16.,17.,18.,19.,20.,
21.5,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.5,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.5,42.,43.,44.]
expected_result_y = [2.5,3.5,4.5,5.5,6.5,7.5,8.4666667,9.4,10.4,11.4,
12.4,13.975,14.5,15.5,16.5,17.5,18.466666667,19.4,20.4,21.4,
22.4,23.975,24.5,25.5,26.5,27.5,28.46666667,29.4,30.4,31.4,
32.4,33.975,34.5,35.5,36.5,37.5,38.466666667,39.4,40.4,41.4,
42.4,43.975,44.5,45.5, 46.5]
expected_result_npts = 45
x_dist = 5.
agdrift_empty.num_db_values = 51
x_array_in = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,
11.5,12.,13.,14.,15.,16.,17.,18.,19.,20.,
21.5,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.5,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.5,42.,43.,44.,45.,46.,47.,48.,49.,50.]
y_array_in = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,
11.,12.,13.,14.,15.,16.,17.,18.,19.,20.,
21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.,45.,46.,47.,48.,49.,50.]
x_array_out, y_array_out, npts_out = agdrift_empty.generate_running_avg(agdrift_empty.num_db_values,
x_array_in, y_array_in, x_dist)
npt.assert_array_equal(expected_result_npts, npts_out, verbose=True)
npt.assert_allclose(x_array_out, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(y_array_out, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
pass
tab1 = [x_array_out, expected_result_x]
tab2 = [y_array_out, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print('expected {0} number of points and got {1} points'.format(expected_result_npts, npts_out))
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
def test_generate_running_avg3(self):
"""
:description creates a running average for a specified x axis width (e.g., 7-day average values of an array);
averages reflect weighted average assuming linearity between x points;
average is calculated as the area under the y-curve beginning at each x point and extending out x_dist
divided by x_dist (which yields the weighted average y between the relevant x points)
:param x_array_in: array of x-axis values
:param y_array_in: array of y-axis values
:param num_db_values: number of points in the input arrays
:param x_array_out: array of x-zxis values in output array
:param y_array_out: array of y-axis values in output array
:param npts_out: number of points in the output array
:param x_dist: width in x_axis units of running weighted average
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE This test uses a monotonically increasing y_array and inserts a gap in the x values
that is greater than x_dist
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
expected_result_npts = pd.Series([], dtype='object')
x_array_in = pd.Series([], dtype='float')
y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = pd.Series([], dtype='float')
try:
expected_result_x = [0.,1.,2.,3.,4.,5.,6.,7.,16.,17.,18.,19.,20.,
21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.,45.,46.,47.,48.,49.,50.,51.,52.]
expected_result_y = [2.5,3.5,4.5,5.4111111,6.14444444,6.7,7.07777777,7.277777777,10.5,11.5,
12.5,13.5,14.5,15.5,16.5,17.5,18.5,19.5,20.5,21.5,
22.5,23.5,24.5,25.5,26.5,27.5,28.5,29.5,30.5,31.5,
32.5,33.5,34.5,35.5,36.5,37.5,38.5,39.5,40.5,41.5,
42.5,43.5,44.5,45.5, 46.5]
expected_result_npts = 45
x_dist = 5.
num_db_values = 51
x_array_in = [0.,1.,2.,3.,4.,5.,6.,7.,16.,17.,18.,19.,20.,
21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.,45.,46.,47.,48.,49.,50.,
51.,52.,53.,54.,55.,56.,57.,58.]
y_array_in = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,
11.,12.,13.,14.,15.,16.,17.,18.,19.,20.,
21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.,45.,46.,47.,48.,49.,50.]
x_array_out, y_array_out, npts_out = agdrift_empty.generate_running_avg(num_db_values, x_array_in,
y_array_in, x_dist)
npt.assert_array_equal(expected_result_npts, npts_out, verbose=True)
npt.assert_allclose(x_array_out, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(y_array_out, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
pass
tab1 = [x_array_out, expected_result_x]
tab2 = [y_array_out, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print('expected {0} number of points and got {1} points'.format(expected_result_npts, npts_out))
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
def test_locate_integrated_avg(self):
"""
:description retrieves values for distance and the first deposition scenario from the sql database
and generates running weighted averages from the first x,y value until it locates the user
specified integrated average of interest
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE any blank fields are filled with 'nan'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
expected_result_npts = pd.Series([], dtype='object')
x_array_in = pd.Series([], dtype='float')
y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = pd.Series([], dtype='float')
try:
expected_result_x = [0.,0.102525,0.20505,0.4101,0.8202,1.6404,3.2808,4.9212,6.5616,9.8424,13.1232,19.6848,26.2464,
32.808,39.3696,45.9312,52.4928,59.0544,65.616,72.1776,78.7392,85.3008,91.8624,98.424,104.9856,
111.5472,118.1088,124.6704,131.232,137.7936,144.3552,150.9168,157.4784,164.04,170.6016,177.1632,
183.7248,190.2864,196.848,203.4096,209.9712,216.5328,223.0944,229.656,236.2176,242.7792,249.3408,
255.9024,262.464,269.0256,275.5872,282.1488,288.7104,295.272,301.8336,308.3952,314.9568,321.5184,
328.08,334.6416,341.2032,347.7648,354.3264,360.888,367.4496,374.0112,380.5728,387.1344,393.696,
400.2576,406.8192,413.3808,419.9424,426.504,433.0656,439.6272,446.1888,452.7504,459.312,465.8736,
472.4352,478.9968,485.5584,492.12,498.6816,505.2432,511.8048,518.3664,524.928,531.4896,538.0512,
544.6128,551.1744,557.736,564.2976,570.8592,577.4208,583.9824,590.544,597.1056,603.6672,610.2288,
616.7904,623.352,629.9136,636.4752,643.0368,649.5984,656.16,662.7216,669.2832,675.8448,682.4064,
688.968,695.5296,702.0912,708.6528,715.2144,721.776,728.3376,734.8992,741.4608,748.0224,754.584,
761.1456,767.7072,774.2688,780.8304,787.392,793.9536,800.5152,807.0768,813.6384,820.2,826.7616,
833.3232,839.8848,846.4464,853.008,859.5696,866.1312,872.6928,879.2544,885.816,892.3776,898.9392,
905.5008,912.0624,918.624,925.1856,931.7472,938.3088,944.8704,951.432,957.9936,964.5552,971.1168,
977.6784,984.24,990.8016]
expected_result_y = [0.364712246,0.351507467,0.339214283,0.316974687,0.279954504,0.225948786,0.159949625,
0.123048839,0.099781801,0.071666234,0.056352938,0.03860139,0.029600805,0.024150524,
0.020550354,0.01795028,0.015967703,0.014467663,0.013200146,0.01215011,0.011300098,
0.010550085,0.009905072,0.009345065,0.008845057,0.008400051,0.008000046,0.007635043,
0.007300039,0.007000034,0.006725033,0.00646503,0.006230027,0.006010027,0.005805023,
0.005615023,0.005435021,0.00527002,0.00511002,0.004960017,0.004820017,0.004685016,
0.004560015,0.004440015,0.004325013,0.004220012,0.004120012,0.004020012,0.003925011,
0.003835011,0.00375001,0.00367001,0.00359001,0.00351001,0.003435009,0.003365009,
0.003300007,0.003235009,0.003170007,0.003110007,0.003055006,0.003000007,0.002945006,
0.002895006,0.002845006,0.002795006,0.002745006,0.002695006,0.002650005,0.002610005,
0.002570005,0.002525006,0.002485004,0.002450005,0.002410005,0.002370005,0.002335004,
0.002300005,0.002265004,0.002235004,0.002205004,0.002175004,0.002145004,0.002115004,
0.002085004,0.002055004,0.002025004,0.002000002,0.001975004,0.001945004,0.001920002,
0.001900002,0.001875004,0.001850002,0.001830002,0.001805004,0.001780002,0.001760002,
0.001740002,0.001720002,0.001700002,0.001680002,0.001660002,0.001640002,0.001620002,
0.001605001,0.001590002,0.001570002,0.001550002,0.001535001,0.001520002,0.001500002,
0.001485001,0.001470002,0.001455001,0.001440002,0.001425001,0.001410002,0.001395001,
0.001385001,0.001370002,0.001355001,0.001340002,0.001325001,0.001315001,0.001305001,
0.001290002,0.001275001,0.001265001,0.001255001,0.001245001,0.001230002,0.001215001,
0.001205001,0.001195001,0.001185001,0.001175001,0.001165001,0.001155001,0.001145001,
0.001135001,0.001125001,0.001115001,0.001105001,0.001095001,0.001085001,0.001075001,
0.001065001,0.00106,0.001055001,0.001045001,0.001035001,0.001025001,0.001015001,
0.001005001,0.0009985,0.000993001,0.000985001,0.000977001,0.000969501]
expected_result_npts = 160
expected_x_dist_of_interest = 990.8016
x_dist = 6.56
weighted_avg = 0.0009697 #this is the running average value we're looking for
agdrift_empty.distance_name = 'distance_ft'
agdrift_empty.scenario_name = 'ground_low_vf'
agdrift_empty.num_db_values = 161
agdrift_empty.find_nearest_x = True
x_array_in = agdrift_empty.get_distances(agdrift_empty.num_db_values)
y_array_in = agdrift_empty.get_scenario_deposition_data(agdrift_empty.scenario_name, agdrift_empty.num_db_values)
x_array_out, y_array_out, npts_out, x_dist_of_interest, range_chk = \
agdrift_empty.locate_integrated_avg(agdrift_empty.num_db_values, x_array_in, y_array_in, x_dist, weighted_avg)
npt.assert_array_equal(expected_x_dist_of_interest, x_dist_of_interest, verbose=True)
npt.assert_array_equal(expected_result_npts, npts_out, verbose=True)
npt.assert_allclose(x_array_out, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(y_array_out, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
pass
tab1 = [x_array_out, expected_result_x]
tab2 = [y_array_out, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print('expected {0} x-units to area and got {1} '.format(expected_x_dist_of_interest, x_dist_of_interest))
print('expected {0} number of points and got {1} points'.format(expected_result_npts, npts_out))
print("x_array result/x_array_expected")
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print("y_array result/y_array_expected")
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
def test_locate_integrated_avg1(self):
"""
:description retrieves values for distance and the first deposition scenario from the sql database
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE this test is for a monotonically increasing function with some irregularity in x-axis points
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
x_array_in = pd.Series([], dtype='float')
y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = | pd.Series([], dtype='float') | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 28 08:29:18 2020
@author: <NAME>
"""
import requests
from lxml.html import fromstring
import pandas as pd
import time
import schedule
#Definimos la tarea que se va a repetir diariamente
def job(t):
#Definimos la url para trabajar e incluimos un web-agent
url = "https://www.worldometers.info/coronavirus"
header = {
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/71.0.3578.80 Chrome/71.0.3578.80 Safari/537.36"
}
req = requests.get(url, headers=header)
#parseamos el html obtenido con requests
base = fromstring(req.text)
#extraemos la tabla que nos interesa, en este caso contiene id = 'main_table_countries_today'
tabla = base.xpath(".//table[@id='main_table_countries_today']")[0]
#extraemos las filas de encabezado de la tabla
encabezado = tabla.xpath(".//thead/tr/th")
#creamos las columnas del dataset
columnas = list()
for col in encabezado:
columnas.append(col.text_content().replace('\n', '').replace(u'\xa0', u'').strip())
#extraemos los valores de las filas que contienen las cifras para cada columna
filas = tabla.xpath(".//tbody/tr")
etiquetas = list()
valores = list()
for fila in filas:
etiqueta = fila.xpath(".//td/a")
if len(etiqueta) > 0:
etiquetas.append(etiqueta[0].get('href').strip().replace('country/', '').replace('/', ''))
valores.append([valor.text_content().strip() for valor in fila.xpath(".//td")])
#creamos el dataframe
df = pd.DataFrame(valores, columns=columnas)
#removemos los simbolos de "+" y las comas de las cifras para volverlas numericas
cols = ['TotalCases','NewCases','TotalDeaths','NewDeaths','TotalRecovered',"NewRecovered","ActiveCases","Serious,Critical",'TotCases/1M pop',"Deaths/1M pop",'TotalTests',
'Tests/1M pop','Population', '1 Caseevery X ppl','1 Deathevery X ppl','1 Testevery X ppl' ]
df[cols] = df[cols].replace({'\+': '', ',': '',"N/A":'' }, regex=True)
#convertimos a numericas las columnas
df[cols] = df[cols].apply(pd.to_numeric)
df.dtypes
#df.describe().transpose()
#hago un sort por total de casos
df = df.sort_values(by=['TotalCases'], ascending=False)
#agregamos un campo de fecha actual al dataframe
df['Date'] = | pd.to_datetime('today') | pandas.to_datetime |
import pandas as pd
import pybedtools
import logging, os
def log_message(*message):
"""write message to logfile and stdout"""
if message:
for i in message:
logging.info(i)
print(i)
def query_alias(aliases, gene):
"""check if gene is in list of aliases"""
# check if value is nan
if aliases == aliases:
aliases = [alias.replace(" ", "") for alias in aliases]
if gene in aliases:
return True
else:
return False
else:
return False
def main(family, hpo, ensembl, refseq, hgnc):
logfile = "logs/hpo_to_panel/genes.log"
logging.basicConfig(
filename=logfile,
filemode="w",
level=logging.DEBUG,
format="%(asctime)s:%(message)s",
datefmt="%Y-%m-%d %H:%M",
)
log_message("Loading gene coordinates and aliases")
hpo = pd.read_csv(hpo, sep="\t", comment="#").set_index("Gene ID")
ensembl = pd.read_csv(ensembl).astype(str).set_index("name")
refseq = pd.read_csv(refseq).astype(str).set_index("name")
hgnc = pd.read_csv(hgnc, sep="\t")
genes = pd.concat([ensembl, refseq])
# join hpo terms and ensembl+refseq genes on gene id (most gene ids in the hpo file are ENSG ids, but not all)
log_message("Mapping HPO genes to gene coordinates")
hpo_coord = genes.join(hpo, how="inner").reset_index()
missing = [gene for gene in hpo.index if gene not in hpo_coord["index"].values]
# check missing genes against HGNC previous symbols and aliases
log_message("Mapping missing genes to HGNC aliases")
hgnc["Alias symbols"] = hgnc["Alias symbols"].apply(
lambda x: x.split(",") if x == x else x
)
hgnc["Previous symbols"] = hgnc["Previous symbols"].apply(
lambda x: x.split(",") if x == x else x
)
alias_list = hgnc["Alias symbols"].dropna().tolist()
previous_list = hgnc["Previous symbols"].dropna().tolist()
alias_prev_symbols = alias_list + previous_list
alias_prev_symbols = [
alias.replace(" ", "") for alias in alias_prev_symbols for alias in alias
]
found = []
for gene in missing:
if gene in alias_prev_symbols:
try:
symbol = hgnc[
hgnc["Alias symbols"].apply(lambda x: query_alias(x, gene))
]["Approved symbol"].values[0]
except IndexError:
symbol = hgnc[
hgnc["Previous symbols"].apply(lambda x: query_alias(x, gene))
]["Approved symbol"].values[0]
# using approved symbol, check refseq genes again
# if gene is in refseq, add coordinates to hpo_coord df
try:
coord = refseq.loc[symbol].copy()
hpo_coord = | pd.concat([hpo_coord, coord], axis=0) | pandas.concat |
from __future__ import annotations
import pytest
from pandas.errors import ParserWarning
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Series,
to_datetime,
)
import pandas._testing as tm
from pandas.io.xml import read_xml
@pytest.fixture(params=[pytest.param("lxml", marks=td.skip_if_no("lxml")), "etree"])
def parser(request):
return request.param
@pytest.fixture(
params=[None, {"book": ["category", "title", "author", "year", "price"]}]
)
def iterparse(request):
return request.param
def read_xml_iterparse(data, **kwargs):
with tm.ensure_clean() as path:
with open(path, "w") as f:
f.write(data)
return read_xml(path, **kwargs)
xml_types = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<shape>square</shape>
<degrees>00360</degrees>
<sides>4.0</sides>
</row>
<row>
<shape>circle</shape>
<degrees>00360</degrees>
<sides/>
</row>
<row>
<shape>triangle</shape>
<degrees>00180</degrees>
<sides>3.0</sides>
</row>
</data>"""
xml_dates = """<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<shape>square</shape>
<degrees>00360</degrees>
<sides>4.0</sides>
<date>2020-01-01</date>
</row>
<row>
<shape>circle</shape>
<degrees>00360</degrees>
<sides/>
<date>2021-01-01</date>
</row>
<row>
<shape>triangle</shape>
<degrees>00180</degrees>
<sides>3.0</sides>
<date>2022-01-01</date>
</row>
</data>"""
# DTYPE
def test_dtype_single_str(parser):
df_result = read_xml(xml_types, dtype={"degrees": "str"}, parser=parser)
df_iter = read_xml_iterparse(
xml_types,
parser=parser,
dtype={"degrees": "str"},
iterparse={"row": ["shape", "degrees", "sides"]},
)
df_expected = DataFrame(
{
"shape": ["square", "circle", "triangle"],
"degrees": ["00360", "00360", "00180"],
"sides": [4.0, float("nan"), 3.0],
}
)
| tm.assert_frame_equal(df_result, df_expected) | pandas._testing.assert_frame_equal |
#! /usr/bin/env python
#
"""
Inspired by https://github.com/ufeindt/marshaltools
"""
import json
import requests
import pandas
import os
import warnings
import numpy as np
from . import io
try:
import matplotlib.pyplot as mpl
_HAS_MPL = True
except ImportError:
warnings.warn("cannot import matplotlib (front-end error most likely)")
_HAS_MPL = False
# list of effects:
# list_program_sources.cgi | auth, data={'programidx' : str(programidx)} # list of sources associated to the program
# list_programs.cgi | auth # list of program you belong to
# view_source.cgi | auth, data={'name' : name}
# source_summary.cgi | auth, data={'sourceid' : str(source['id'])}
# add_spec.cgi | auth, data=payload,files=files
# => payload = {'sourceid' : str(source['id']),'spectype':args.spectype,'programid':get_prog_id(args.prog_name),'instrumentid':get_inst_id(telname),
# 'format':fformat,'obsdate':obsdate,'exptime':exptime,'observer':user,'reducedby':reducer,'class':"",'redshift':"",
# 'phase':"",'comment':"",'commit':'yes','submit':'upload the file'}
#
# growth_treasures_transient.cgi?cutprogramidx=%d
#
#marshal_root = 'http://skipper.caltech.edu:8080/cgi-bin/growth/'
#summary_url = marshal_root + 'source_summary.cgi?sourceid=%s'
#listprog_url = marshal_root + 'list_programs.cgi' # list of program you belong to
#scanning_url = marshal_root + 'growth_treasures_transient.cgi'
#saving_url = marshal_root + 'save_cand_growth.cgi?candid=%s&program=%s'
#savedsources_url = marshal_root + 'list_program_sources.cgi'
#rawsaved_url = marshal_root + 'list_sources_bare.cgi'
#annotate_url = marshal_root + 'edit_comment.cgi'
#ingest_url = marshal_root + 'ingest_avro_id.cgi'
#
#
MARSHAL_BASEURL = "http://skipper.caltech.edu:8080/cgi-bin/growth/"
MARSHAL_LC_DEFAULT_SOUCE = "plot_lc"
from .io import LOCALSOURCE
MARSHALSOURCE = os.path.join(LOCALSOURCE,"marshal")
def _account_id_declined_(username, password):
""" This returns True if the login information has been rejected"""
r = requests.post( MARSHAL_BASEURL+'list_programs.cgi', auth=(username, password) )
return "This server could not verify that you" in r.text
#############################
# #
# Stand Alone Functions #
# #
#############################
def convert_lc_tofritz(marshal_lc, name):
""" """
fritz_keys = ['obj_id', 'ra', 'dec', 'filter', 'mjd', 'instrument_id',
'instrument_name', 'ra_unc', 'dec_unc', 'origin', 'id', 'groups',
'altdata', 'mag', 'magerr', 'magsys', 'limiting_mag']
from astropy import time
data = marshal_lc[["filter","mag","emag","limmag"]].rename({"emag":"magerr", "limmag":"limiting_mag"},
axis=1).replace(to_replace=99.0, value=np.NaN)
flag_gri = data["filter"].isin(["g","r","i"])
data.loc[flag_gri,"filter"] = "ztf"+data.loc[flag_gri,"filter"].astype('str')
data["mjd"] = time.Time(marshal_lc["jdobs"].astype("float"), format="jd").mjd
data["obj_id"] = name
data["magsys"] = 'ab'
data["instrument_name"] = marshal_lc["instrument"].str.split("+", expand=True)[1]
for k in ['ra', 'dec','ra_unc', 'dec_unc', 'id', 'groups', 'altdata',
'instrument_id',"origin"]:
data[k] = np.NaN
return data[fritz_keys]
#############################
# #
# Stand Alone Functions #
# #
#############################
def get_target_data(name):
""" provide a name (or list of names) and get its/there marshal information
IMPORTANT: This function is slow, but it takes the same amount of time if you provide 1 or any number of targets.
So better provide a long list of target name at once.
Returns
-------
pandas.DataFrame
"""
m = MarshalAccess()
m.load_target_sources()
return m.get_target_data(name)
def get_target_lightcurve(name, download=True, update=False, as_fritz=False, **kwargs):
""" Get the target lightcurve from the marshal.
Parameters
----------
name: [string]
Target name
download: [bool] -optional-
Should the lightcurve be downloaded if necessary ?
update: [bool] -optional-
Force the re-download of the lightcurve.
Returns
-------
DataFrame
"""
if update:
download_lightcurve(name, overwrite=True, **kwargs)
lc = get_local_lightcurves(name)
if lc is None:
if update:
warnings.warn("Download did not seem successful. Cannot retreive the lightcurve")
return None
elif not download:
warnings.warn(f"No local lightcurve for {name}. download it or set download to true")
return None
lc = get_target_lightcurve(name, update=True, **kwargs)
if as_fritz:
return convert_lc_tofritz(lc, name=name)
return lc
def get_target_spectra(name, download=True, update=False, only_sedm=False, **kwargs):
""" Get target spectra from the marshal.
Parameters
----------
name: [string]
Target name
download: [bool] -optional-
Should the spectra be downloaded if necessary ?
update: [bool] -optional-
Force the re-download of the spectra.
Returns
-------
DataFrame
"""
if update:
download_spectra(name, overwrite=True, **kwargs)
spec = get_local_spectra(name, only_sedm=only_sedm)
if spec is None:
if update:
warnings.warn("Download did not seem successful. Cannot retreive the spectra")
return None
elif not download:
warnings.warn(f"No local spectra for {name}. download it or set download to true")
return None
return get_target_spectra(name, update=True, only_sedm=only_sedm, **kwargs)
else:
return spec
# -------------- #
# PLOT LC #
# -------------- #
if _HAS_MPL:
GENERIC = dict(alpha=1, mew=0.4, mec="0.7", ecolor="0.7", ls="None")
PROP = { # ZTF
"ztf:r":dict(marker="o",ms=7, mfc="C3"),
"ztf:g":dict(marker="o",ms=7, mfc="C2"),
"ztf:i":dict(marker="o",ms=7, mfc="C1"),
# Swift
"uvot:B": dict(marker="s", ms=5, mfc="C0"),
"uvot:u": dict(marker="s", ms=5, mfc=mpl.cm.Blues(0.7)),
"uvot:uvm2":dict(marker="s", ms=5, mfc=mpl.cm.Purples(0.6)),
"uvot:uvm2":dict(marker="s", ms=5, mfc=mpl.cm.Purples(0.8)),
"uvot:uvm1":dict(marker="s", ms=5, mfc=mpl.cm.Purples(0.4)),
"uvot:V": dict(marker="s", ms=5, mfc=mpl.cm.Greens(0.9)),
#
"ioo:u": dict(marker="d", ms=6,mfc=mpl.cm.Blues(0.6)),
"ioo:g": dict(marker="d", ms=6,mfc=mpl.cm.Greens(0.6)),
"ioo:r": dict(marker="d", ms=6,mfc=mpl.cm.Reds(0.7)),
"ioo:i": dict(marker="d",ms=6, mfc=mpl.cm.Oranges(0.6)),
"ioo:z": dict(marker="d", ms=6,mfc=mpl.cm.binary(0.8))
}
for v in PROP.values():
for k,v_ in GENERIC.items():
v[k]=v_
def plot_lightcurve(lc_dataframe, savefile=None, ax=None, title=None, show_legend=True):
""" """
import matplotlib.pyplot as mpl
from astropy.time import Time
if ax is None:
fig = mpl.figure(figsize=[7,4])
ax = fig.add_axes([0.1,0.12,0.67,0.8])
else:
fig = ax.figure
lc_dataframe["inst_filter"] = [d.split("+")[-1].replace('"',"").lower()
for d in lc_dataframe["instrument"]+":"+lc_dataframe["filter"]]
if 'magpsf' in lc_dataframe.columns:
keys = {'filter':'inst_filter',
'mag':'magpsf',
'mag.err':'sigmamagpsf',
'upmag':'limmag',
'jdobs':'jdobs'}
else:
keys = {'filter':'inst_filter',
'mag':'mag',
'mag.err':'emag',
'upmag':'limmag',
'jdobs':'jdobs'}
# DataPoints
for filter_ in np.unique(lc_dataframe[keys["filter"]]):
if filter_ not in PROP:
warnings.warn(f"Unknown instrument: {filter_} | magnitude not shown")
continue
jd, mag, magerr = lc_dataframe[lc_dataframe[keys["filter"]].isin([filter_]) &
~lc_dataframe[keys["mag"]].isin([99.00])][
[keys["jdobs"],keys["mag"],keys["mag.err"]]
].values.T
ax.errorbar([Time(jd_, format="jd").datetime for jd_ in jd],
mag, yerr= magerr,
label="%s"%filter_, **PROP[filter_.replace('"',"")])
# Upper Limits
ax.invert_yaxis()
for filter_ in np.unique(lc_dataframe[keys["filter"]]):
if filter_ not in PROP:
warnings.warn(f"Unknown instrument: {filter_} | magnitude not shown")
continue
jdup, upmag = lc_dataframe[lc_dataframe[keys["filter"]].isin([filter_]) &
lc_dataframe[keys["mag"]].isin([99.00])][
[keys["jdobs"],keys["upmag"]]
].values.T
ax.errorbar([Time(jd_, format="jd").datetime for jd_ in jdup],
upmag, yerr=0.15, lolims=True,alpha=0.3,
color=PROP[filter_.replace('"',"")]["mfc"],
ls="None",
label="_no_legend_")
ax.set_ylabel("magnitude", fontsize="large")
ax.set_xlabel("Time", fontsize="large")
if title is not None:
ax.set_title(title)
if show_legend:
ax.legend(loc=[1.02,0.], fontsize="medium" )
if savefile:
fig.savefile(savefile)
return {"ax":ax, "fig":fig}
# -------------- #
# Data I/O #
# -------------- #
# - What at ?
def target_spectra_directory(name):
""" where Marshal spectra are stored """
return os.path.join(MARSHALSOURCE,"spectra",name)
def target_lightcurves_directory(name):
""" where Marshal lightcurves are stored """
return os.path.join(MARSHALSOURCE,"lightcurves",name)
def target_source_directory(name):
""" where Marshal lightcurves are stored """
return os.path.join(MARSHALSOURCE,"source",name)
def target_alerts_directory(name):
""" where Marshal lightcurves are stored """
return os.path.join(MARSHALSOURCE,"alerts",name)
def get_program_filepath(program):
""" builds the program filepath
"""
return os.path.join(MARSHALSOURCE,f"{program}_target_sources.csv")
def program_datasource_filepath(program):
""" Where target sources are stored in your local files
Parameters
----------
program: [string/None list of]
Program you want to load.
Formats are:
- */None/All: all are loaded
- keyword: programs with the given keyword in the name are loaded, e.g. 'Cosmo'
- list of keywords: program with any of the keyword are loaded, e.g. ['Cosmo','Infant']
Returns
-------
list of filenames
"""
return {l.split("_")[0]:os.path.join(MARSHALSOURCE,l) for l in os.listdir(MARSHALSOURCE) if l.endswith("target_sources.csv") and
(program in ["*", None,"all", "All"] or np.any([program_ in l for program_ in np.atleast_1d(program)]))}
# - Get the FullPathes
def get_local_spectra(name, only_sedm=False, pysedm=True):
""" returns list of fullpath of spectra on your computer for the given target name.
Remark: These spectra have to be stored in the native `$ZTFDATA`/marshal/spectra/`name`
Parameters
----------
name: [str]
ZTF name (as in the Marshal)
only_sedm: [bool] -optional-
Do you want only the SEDM spectra ?
pysedm: [bool] -optional-
If only_sedm is True, do you want only the pysedm-based spectra ?
Returns
-------
dict # format: {filename:{data list}, ...}
"""
dir_ = target_spectra_directory(name)
if not os.path.isdir(dir_):
warnings.warn(f"No spectra for {name}")
return
all_files = {d: open( os.path.join(dir_,d) ).read().splitlines() for d in os.listdir( dir_ )
if (only_sedm and "P60" in d) or not only_sedm}
if not only_sedm or not pysedm:
return all_files
return {d:v for d,v in all_files.items() if np.any(["SOURCE" in l_ for l_ in v])}
def get_local_lightcurves(name, only_marshal=True, source=MARSHAL_LC_DEFAULT_SOUCE):
""" returns list of fullpath of lightcurves on your computer for the given target name.
Remark: These lightcurves have to be stored in the native `$ZTFDATA`/marshal/lightcurves/`name`
"""
dir_ = target_lightcurves_directory(name)
if not os.path.isdir(dir_):
warnings.warn(f"No lightcurve for {name}")
return
dataout = {d: pandas.read_csv(os.path.join(dir_,d)) for d in os.listdir(dir_)
if os.path.isfile( os.path.join(dir_,d) )}
if len(dataout) == 0:
return None
if only_marshal:
try:
return dataout["marshal_%s_lightcurve_%s.csv"%(source,name)]
except:
warnings.warn("No marshal lc with source %s identify for %s \n all source returned as a dict"%(source,name))
return dataout
def get_local_alerts(name):
""" """
filepath = target_alerts_directory(name)+"marshal_alerts_%s.csv"%(name)
return pandas.read_csv(filepath)
# -------------- #
# Downloading #
# -------------- #
def download_spectra(name, dirout="default", auth=None, verbose=False, **kwargs):
"""Download all spectra for a source in the marshal as a tar.gz file
Parameters:
-----------
name: [str]
Name of a target on the marshal.
dirout: [str] -optional-
Directory where the data should be stored.
Additional options:
- `dirout=None`: The spectra are not saved be returned
- `dirout='default'`: The spectra will be saved in native target location
(`$ZTFDATA`/marshal/spectra/`name`)
Spectra saved here can be recovered using `get_local_spectra`
* This is favored *
auth: [str,str] -optional-
Marshal [username, password]
verbose: [bool] -optional-
Prints to know what is going on.
**kwargs goes to ztfquery.io.download_single_url()
Returns
-------
None (or list of data if `dirout=None`)
"""
# fileout is saved later to manage decompression
import tarfile
from io import BytesIO
response = io.download_single_url(MARSHAL_BASEURL+'batch_spec.cgi',
fileout=None,
data={"name":name},
auth=io._load_id_("marshal") if auth is None else auth,
cookies="no_cookies", show_progress=False,
**kwargs)
try:
tar = tarfile.open(fileobj=BytesIO( response.content ), mode='r')
except:
raise IOError("Cannot find a spectrum for %s"%name)
# No directory out? Then reformated data returned
if dirout is None or dirout in ["None"]:
if verbose: print("Data returned (dirout=None)")
out = {member.name:tar.extractfile(member).read().decode("utf-8").splitlines() for member in tar.getmembers()}
return out
# Directory given, then dump data there:
if dirout in ["default"]:
dirout = target_spectra_directory(name)
if verbose: print("Data will be stored here: %s"%dirout)
if not os.path.exists(dirout):
os.makedirs(dirout, exist_ok=True)
tar.extractall(dirout)
def download_source(name, dirout="default",
auth=None, verbose=False,
overwrite=False, return_data=False, **kwargs):
""" """
if dirout in ["None"]: dirout = None
if dirout in ["default"]: dirout = target_source_directory(name)
if dirout is not None:
fileout = f"marshal_{name}.csv"
fileout_full = os.path.join(dirout,fileout)
if os.path.isfile(fileout_full) and not overwrite:
warnings.warn(f"The source {fileout_full} already exists. Set overwrite to True to update it.")
return
response = io.download_single_url(MARSHAL_BASEURL+f"source_summary.cgi?sourceid={name}",
fileout=None,
auth=io._load_id_("marshal") if auth is None else auth,
cookies="no_cookies", show_progress=False,
**kwargs)
return response
def download_lightcurve(name, dirout="default",
auth=None, verbose=False,
source=MARSHAL_LC_DEFAULT_SOUCE,
overwrite=False, return_lc=False,
**kwargs):
"""Download all spectra for a source in the marshal as a tar.gz file
Parameters:
-----------
name: [str]
Name of a target on the marshal.
dirout: [str] -optional-
Directory where the data should be stored.
Additional options:
- `dirout=None`: The spectra are not saved be returned
- `dirout='default'`: The lightcurve will be saved in native target location
(`$ZTFDATA`/marshal/lightcurves/`name`)
lightcurve saved here can be recovered using `get_local_lightcurves`
* This is favored *
source: [str] -optional-
Source of the data in the marshal
- print_lc.cgi // basic
- plot_lc.cgi // contains slight more information [default]
auth: [str,str] -optional-
Marshal [username, password]
overwrite: [bool] -optional-
Checks
verbose: [bool] -optional-
Prints to know what is going on.
**kwargs goes to ztfquery.io.download_single_url()
Returns
-------
None (or pandas.DataFrame)
"""
# fileout is saved later to manage decompression
if source not in ["print_lc","plot_lc"]:
raise ValueError("source should be either 'print_lc' or 'plot_lc', '%s' given"%source)
if dirout in ["None"]: dirout = None
if dirout in ["default"]: dirout = target_lightcurves_directory(name)
if dirout is not None:
fileout = "marshal_%s_lightcurve_%s.csv"%(source, name)
fileout_full = os.path.join(dirout,fileout)
if os.path.isfile(fileout_full) and not overwrite:
warnings.warn("The lightcurve %s already exists. Set overwrite to True to update it."%(fileout_full))
return
response = io.download_single_url(MARSHAL_BASEURL+source+'.cgi',
fileout=None,
data={"name":name},
auth=io._load_id_("marshal") if auth is None else auth,
cookies="no_cookies", show_progress=False,
**kwargs)
# Convert the response into DataFrame | depending on the source
if source in ['plot_lc']:
table_start = [i for i,l in enumerate(response.text.splitlines()) if "table border=0 width=850" in l]
lctable_ = pandas.read_html("\n".join(response.text.splitlines()[table_start[0]:]))[0]
_ = lctable_.pop(0)
dataframe = pandas.DataFrame(lctable_[1:].values, columns=np.asarray(lctable_.iloc[0], dtype="str"))
else:
data = response.text.split("<table border=0 width=850>")[-1].replace(' ', '').replace('\n', '').split("<br>")
dataframe = pandas.DataFrame(data=[d.split(",")[:8] for d in data[1:] if len(d)>0], columns=data[0].split(",")[:8])
# returns it
if dirout is not None:
# Directory given, then dump data there:
if verbose: print("Data will be stored here: %s"%fileout_full)
if not os.path.exists(dirout):
os.makedirs(dirout, exist_ok=True)
dataframe.to_csv(fileout_full, index=False)
else:
return_lc=True
if return_lc:
return dataframe
def download_alerts(name, dirout="default",
auth=None, verbose=False,
overwrite=False, return_it=False,
**kwargs):
"""Download all spectra for a source in the marshal as a tar.gz file
Parameters:
-----------
name: [str]
Name of a target on the marshal.
dirout: [str] -optional-
Directory where the data should be stored.
Additional options:
- `dirout=None`: The spectra are not saved be returned
- `dirout='default'`: The lightcurve will be saved in native target location
(`$ZTFDATA`/marshal/alerts/`name`)
lightcurve saved here can be recovered using `get_local_alerts`
* This is favored *
auth: [str,str] -optional-
Marshal [username, password]
overwrite: [bool] -optional-
Checks
verbose: [bool] -optional-
Prints to know what is going on.
**kwargs goes to ztfquery.io.download_single_url()
Returns
-------
None (or pandas.DataFrame)
"""
fileout = "marshal_alerts_%s.csv"%(name)
if dirout in ["None"]: dirout = None
if dirout in ["default"]: dirout = target_alerts_directory(name)
fileout_full = os.path.join(dirout,fileout)
if os.path.isfile(fileout_full) and not overwrite:
warnings.warn("The alert %s already exists. Set overwrite to True to update it."%(fileout))
return
response = io.download_single_url(MARSHAL_BASEURL+'view_avro.cgi',
fileout=None,
data={"name":name},
auth=io._load_id_("marshal") if auth is None else auth,
cookies="no_cookies", show_progress=False,
**kwargs)
dataframe = pandas.DataFrame([json.loads(l.split("</pre>")[0])
for l in response.text.split("<table>")[-1].replace(' ', '').replace('\n', '').split("<br><pre>")[1:]]
)
# returns it
if dirout is not None:
# Directory given, then dump data there:
if verbose: print("Alerts will be stored here: %s"%fileout_full)
if not os.path.exists(dirout):
os.makedirs(dirout, exist_ok=True)
dataframe.to_csv(fileout_full, index=False)
else:
return_it=True
if return_it:
return dataframe
def query_program_target(program, getredshift=True, getclassification=True, auth=None):
""" download target source information returns them as pandas.DataFrame
Parameters
----------
program: [int]
Program Number
getredshift, getclassification: [bool, bool] -optional-
If redshift and/or classification have been made in the marshal,
do you want them ?
auth: [str,str] -optional-
Marshal's [username, password]
CAUTION: if you are requesting program(s), make sure the `auth`
matches that of your loaded program if already loaded.
Returns
-------
pandas.DataFrame
"""
r = requests.post(MARSHAL_BASEURL+'list_program_sources.cgi',
auth=io._load_id_("marshal", askit=True) if auth is None else auth,
data={'programidx': program,
'getredshift': int(getredshift),
'getclassification': int(getclassification)})
return pandas.DataFrame.from_dict(json.loads(r.text))
#############################
# #
# Marshall Class #
# #
#############################
class MarshalAccess( object ):
""" Access the Marshal """
def __init__(self, load_programs=False, **kwargs):
"""
"""
if load_programs:
self.load_user_programs( **kwargs )
# -------------- #
# Main Methods #
# -------------- #
#
# I/O
#
def store(self):
""" Store the target_sources in the given file.
= By default files are stored as function of program ids (if any) =
Parameters
----------
Returns
-------
None
"""
for program in self.get_loaded_programs():
fileout = get_program_filepath(program)#[program]
if not os.path.isfile( fileout ):
dirout = "/".join(fileout.split("/")[:-1])
if not os.path.exists(dirout):
os.mkdir(dirout)
self.get_program_sources(program).to_csv(fileout, index=False)
@classmethod
def load_local(cls, program=None):
""" """
filepath = program_datasource_filepath(program)
return cls.load_datafile( filepath )
@classmethod
def load_datafile(cls, dataframefile, program=None):
""" """
# - Dict formating
if not type(dataframefile) is dict:
dataframefile = np.atleast_1d(dataframefile)
if program is None:
programs = [l.split("/")[-1].split("_target_sources")[0] for l in dataframefile]
else:
programs = np.atleast_1d(program)
if len(programs) != len(dataframefile):
raise ValueError("the program and dataframefile don't have the same size.")
dataframefile = {k:v for k,v in zip(programs, dataframefile)}
# - Let's go
programs = list(dataframefile.keys())
list_of_df = []
for p in programs:
file_ = dataframefile[p]
if not os.path.isfile(file_):
raise IOError(f"{file_} does not exists")
list_of_df.append(pandas.read_csv(file_))
this = cls()
this.set_target_sources(list_of_df, program=programs)
return this
#
# DOWNLOADER
#
@staticmethod
def download_spectra(name, dirout="default", auth=None, return_it=False, **kwargs):
"""
Method calling ztfquery.marshal.download_spectra()
Parameters:
-----------
name: [str or list of]
Name of a target on the marshal.
dirout: [str] -optional-
Directory where the data should be stored.
Additional options:
- `dirout=None`: The spectra are not saved be returned
- `dirout='default'`: The spectra will be saved in native target location
(`$ZTFDATA`/marshal/spectra/`name`)
Spectra saved here can be recovered using `get_local_spectra`
* This is favored *
auth: [str,str] -optional-
Marshal [username, password]
**kwargs goes to ztfquery.io.download_single_url()
Returns
-------
dict
// {name: `return_of ztfquery.marshal.download_spectra()`}
"""
out = {name_: download_spectra(name, dirout=dirout, auth=auth, **kwargs) for name_ in np.atleast_1d(name)}
if return_it:
print("NOTHING IMPLEMENTED FOR SPECTRA")
return
@staticmethod
def download_lightcurve(name, dirout="default", auth=None, return_it=False, **kwargs):
"""
Method calling ztfquery.marshal.download_lightcurve()
Parameters:
-----------
name: [str or list of]
Name of a target on the marshal.
dirout: [str] -optional-
Directory where the data should be stored.
Additional options:
- `dirout=None`: The spectra are not saved be returned
- `dirout='default'`: The lightcurve will be saved in native target location
(`$ZTFDATA`/marshal/lightcurves/`name`)
lightcurve saved here can be recovered using `get_local_lightcurves`
* This is favored *
auth: [str,str] -optional-
Marshal [username, password]
**kwargs goes to ztfquery.io.download_single_url()
Returns
-------
dict
// {name: `return_of ztfquery.marshal.download_lightcurve()`}
"""
out = {name_: download_lightcurve(name, dirout=dirout, auth=auth, return_lc=return_it, **kwargs) for name_ in np.atleast_1d(name)}
if return_it:
return out
@staticmethod
def download_alerts(name, dirout="default", auth=None, return_it=False, **kwargs):
"""
Method calling ztfquery.marshal.download_alerts()
Parameters:
-----------
name: [str or list of]
Name of a target on the marshal.
dirout: [str] -optional-
Directory where the data should be stored.
Additional options:
- `dirout=None`: The spectra are not saved be returned
- `dirout='default'`: The lightcurve will be saved in native target location
(`$ZTFDATA`/marshal/lightcurves/`name`)
lightcurve saved here can be recovered using `get_local_lightcurves`
* This is favored *
auth: [str,str] -optional-
Marshal [username, password]
**kwargs goes to ztfquery.io.download_single_url()
Returns
-------
dict
// {name: `return_of ztfquery.marshal.download_alerts()`}
"""
out = {name_: download_alerts(name, dirout=dirout, auth=auth,return_it=return_it, **kwargs) for name_ in np.atleast_1d(name)}
if return_it:
return out
#
# LOADER
#
def load_user_programs(self, auth=None):
""" """
if auth is None:
auth = io._load_id_("marshal", askit=True)
r = requests.post(MARSHAL_BASEURL+'list_programs.cgi', auth=auth)
r.raise_for_status() # raise a status if issue, like wrong auth
self.program_data = pandas.DataFrame.from_dict(json.loads(r.text))
def load_target_sources(self, program="*",
getredshift=True, getclassification=True,
setit=True, auth=None, store=True):
""" download target source information and store them as a
pandas.DataFrame as self.target_sources
(or returns it, see setit parameter)
Parameters
----------
program: [str or list of] --optional--
You want targets only associated to this program?
e.g. program="Redshift Completeness Factor"
program=["AMPEL Test","Redshift Completeness Factor"]
-> use program = None or program="*" for no program selection
getredshift, getclassification: [bool, bool] -optional-
If redshift and/or classification have been made in the marshal,
do you want them ?
setit: [bool] -optional-
Do you want to set downloaded data to self.target_sources (`setit=True`, default)
or would you prefer no directly get the pandas.DataFrame without
touching to self.target_sources ? (`setit=False`)
auth: [str,str] -optional-
Marshal's [username, password]
CAUTION: if you are requesting program(s), make sure the `auth`
matches that of your loaded program if already loaded.
Remark: If you did not load the user_program yet
(`self.load_user_programs()`), they are authomatically matched.
Returns
-------
None (or pandas.DataFrame if setit=False, see above)
"""
# Cleaning Marshal's datainput
split_ = lambda x: [None, x] if not len(np.atleast_1d(x))==2 else x
program = np.atleast_1d(program)
df = {}
for i,programname_ in enumerate(program):
program_ = self._program_to_programidx_(programname_, auth=auth)[0]
df_ = query_program_target(program_,
getredshift=getredshift,
getclassification=getclassification,
auth=auth)
df_[["magband", "magval"]] = pandas.DataFrame(df_.mag.apply(split_).tolist(), index=df_.index)
_ = df_.pop("mag")
if getclassification:
df_["classification"] = df_["classification"].astype("str")
df[programname_] = df_
if setit:
self.set_target_sources( df, program=program)
if store:
self.store()
if not setit:
return df
#
# SETTER
#
def set_target_sources(self, target_source_dataframe, program="unknown"):
""" Provide a Pandas.DataFrame containing the target source information
as obtained by `load_target_sources`
"""
if type(target_source_dataframe) is pandas.DataFrame:
target_source_dataframe = [target_source_dataframe]
if type(target_source_dataframe) is dict:
self.target_sources = | pandas.concat(target_source_dataframe) | pandas.concat |
import json
import logging
from cmath import nan
import pandas as pd
from common.constants import Constants
from entity.Hotel import Hotel
class Recommender:
def __init__(self):
pass
def read_config(self, context):
self.item_item = pd.read_csv(
Constants.ITEM_SIMILAR_DATASETS_DIRECTORY + "item_item_similar_user_hotel_train_dataset" + context + ".csv",
index_col=0)
self.user_hotel = pd.read_csv(
Constants.USER_ITEM_DATASETS_NORMAL_DIRECTORY + "user_hotel_train_dataset" + context + ".csv",
index_col=0)
# self.user_user = pd.read_csv(
# Constants.USER_SIMILAR_DATASETS_DIRECTORY + "user_user_similar_user_hotel_train_dataset" + context + ".csv",
# index_col=0)
self.items = pd.read_csv(Constants.PRODUCT_DATASETS_DIRECTORY + "tblhotel.csv")
def get_top_ten_predict_for_user(self, userId, context):
self.read_config(context)
rating_by_user = self.user_hotel.loc[(self.user_hotel.index == userId)]
not_rating = []
items = self.user_hotel.columns.tolist()
for i in range(0, len(items)):
if rating_by_user.loc[userId, str(items[i])] == 0:
not_rating.append(items[i])
score_not_rating = []
for i in range(0, len(not_rating)):
itemId = int(not_rating[i])
name = self.get_name_hotel(itemId)
score = self.get_score_user_item(userId, itemId)
item = Hotel()
item.id = itemId
item.name = name
item.score = score
score_not_rating.append(item)
list_sorted = sorted(score_not_rating, key=lambda x: x.score, reverse=True)[:10]
list_hotel_json = []
for hotel in list_sorted:
list_hotel_json.append(json.dumps(hotel.__dict__))
return list_hotel_json
def calculate_similar(self, rating, itemsimilarity):
result = 0
count = 0
for i in range(0, len(rating)):
result += rating[i] * itemsimilarity[i]
if (rating[i] > 0):
count += 1
return result / count
def get_score_user_item(self, userId, itemId):
# print(self.user_item)
try:
ratings_item = self.user_hotel.loc[userId, :].tolist()
similar_item = self.item_item.loc[itemId, :].tolist()
value = self.calculate_similar(ratings_item, similar_item)
return value
except Exception as e1:
print(e1.__cause__)
return 0
def get_name_hotel(self, itemId):
hotel_matrix = | pd.DataFrame(self.items) | pandas.DataFrame |
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score, mean_squared_error
from keras.models import Model, load_model
from keras.layers import Input, Dense, Embedding, SpatialDropout1D, concatenate, Concatenate, SpatialDropout1D
from keras.layers import GRU, Bidirectional, GlobalAveragePooling1D, GlobalMaxPooling1D
from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation, Conv1D, MaxPooling1D
from keras.layers import GlobalMaxPool1D, MaxPooling1D, Add, Flatten
from keras.preprocessing import text, sequence
from keras.callbacks import Callback, EarlyStopping, ModelCheckpoint
from keras.optimizers import SGD
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.optimizers import Adam, RMSprop
from keras import initializers, regularizers, constraints, optimizers, layers
from keras import backend as K
from keras.engine import InputSpec, Layer
from tqdm import tqdm
import sys, os, re, csv, codecs, time, gc, argparse, logging, traceback
import dill
from utils import *
train_file_dir = '../input/train_jpg'
test_file_dir = '../input/test_jpg'
batch_size = 128
num_epoch = 100
patience = 2
image_x = 224
image_y = 224
weight_path = "cnn.h5"
n_train = pd.read_csv("../input/train.csv").shape[0]
n_test = pd.read_csv("../input/test.csv").shape[0]
def get_id_chunks(ix_train, batch_size):
np.random.shuffle(ix_train)
length = int(len(ix_train) / batch_size) + 1
for i in range(length):
yield ix_train[i*batch_size:(i+1)*batch_size]
def resize_img(im, inter=cv2.INTER_AREA):
height, width, _ = im.shape
if height > width:
new_dim = (width//height, im_dim)
else:
new_dim = (im_dim, height*im_dim//width)
imr = cv2.resize(im, new_dim, interpolation=inter)
h, w = imr.shape[:2]
off_x = (im_dim-w)//2
off_y = (im_dim-h)//2
im_out = np.zeros((im_dim, im_dim, n_channels), dtype=imr.dtype)
im_out[off_y:off_y+h, off_x:off_x+w] = imr
del imr
return im_out
def get_image(ids, train_or_test="train"):
if train_or_test=="train":
file_dir = train_file_dir
elif train_or_test=="test":
file_dir = test_file_dir
out = np.zeros((len(ids), image_x, image_y, 3))
for i in range(len(ids)):
try:
img = cv.imread(os.path.join(file_dir, i+".jpg"))
img = resize_img(img)
except:
img = np.zeros((image_x, image_y, 3))
out[i, :, :, :] = img
return out
def rmse(y_true, y_pred):
assert len(y_true) == len(y_pred)
return np.sqrt(np.mean(np.power((y_true - y_pred), 2)))
def root_mean_squared_error(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_true-y_pred)))
model = Sequential()
model.add(Conv2D(64, (3, 3), padding='same',
input_shape=(image_x, image_y, 3)))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('sigmoid'))
model.compile(loss=root_mean_squared_error,
optimizer="rmsprop",
metrics=[root_mean_squared_error])
image_ids = pd.read_csv("../input/train.csv",usecols=["image_id"]).image_id.values
image_ids_test = pd.read_csv("../input/test.csv",usecols=["image_id"]).image_id.values
nsplits = 5
with open("../tmp/oof_index.dat", "rb") as f:
kfolds = dill.load(f)
val_score = []
result = np.zeros((n_test, 1))
cnt = 0
oof_valid = np.zeros((n_train, 1))
for ix_train, ix_valid in kfolds:
print("=======CV=======")
val_ids = image_ids[ix_valid]
X_val = get_image(val_ids, "train")
y_val = y[ix_valid]
bst_val_score = 1
early_stop = 0
for epoch in range(num_epoch):
print("======={}epoch=======".format(epoch))
for ixs in tqdm(get_id_chunks(ix_train, batch_size), total=int(ix_train/batch_size)+1):
train_ids = image_ids[ixs]
X_train = get_image(train_ids, "train")
y_train = y[ixs]
model.fit(X
, y_train
, batch_size=batch_size
, epoch=1
, verbose=1
, shuffle=True
# , validation_data=(X_val, y_val)
)
val_score = rmse(y_val, model.predict(X_val))
if val_score < bet_val_score:
bst_val_score = val_score
model.save_weights(weight_path)
early_stop = 0
else:
early_stop += 1
if early_stop > patience:
print("Early Stopping!! Best Epoch {}".format(epoch))
model.load_weights(weight_path)
break
for i in range(int(len(image_ids_test)/batch_size)+1):
test_ids = image_ids[i*batch_size:(i+1)*batch_size]
X_test = get_image(test_ids, "test")
result[i*batch_size:(i+1)*batch_size] += model.predict(X_test) / nsplits
oof_valid[ix_valid, :] = model.predict(X_val)
K.clear_session()
gc.collect()
df_out = | pd.DataFrame(result, columns=["oof_cnn_image_feature"]) | pandas.DataFrame |
#!/usr/bin/env python3
import re
from pathlib import Path
from multiprocessing import Pool
import numpy as np
import pandas as pd
from astropy import units as u
from astropy.time import Time
from casatools import msmetadata
from tcal_poly import PATHS
STOKES = list("IQUV")
BANDS = list("LSCXUKAQ")
MAD_TO_STD = 1.4826
TEST_FSCALE_LINE = "# Flux density for J2355+4950 in SpW=0 (freq=3.2072e+10 Hz) is: 0.289629 +/- 0.0493206 (SNR = 5.87238, N = 42)"
def get_all_execution_dirs():
return sorted(PATHS.extern.glob("20??-??-*"))
def get_all_field_files():
return sorted(PATHS.extern.glob("20??-??-*/?/images/field_*.dat"))
def get_all_fscale_files():
return sorted(PATHS.extern.glob("20??-??-*/?/*.ms.fscale.dat"))
def mjs_to_date(mjs):
mjd = mjs * u.s.to("day")
time = Time(mjd, format="mjd")
return str(time.datetime.date())
def weighted_mean(vals, errs):
if len(errs) == 0:
return np.nan
else:
weights = 1 / errs**2
return np.nansum(vals * weights) / np.nansum(weights)
def weighted_mean_error(errs):
if len(errs) == 0:
return np.nan
else:
weights = 1 / errs**2
return np.sqrt(1 / np.nansum(weights))
def get_mjs_from_ms_path(path, field=None):
ms_path = list(path.parent.parent.glob("*.ms"))[0]
try:
tool = msmetadata()
tool.open(str(ms_path))
fields = tool.fieldnames()
if field is None:
scan_id = tool.scannumbers()[0]
else:
scan_id = tool.scansforfield(field)
time = tool.timesforscans(scan_id)
return time.min() # seconds
except RuntimeError:
raise
finally:
tool.done()
tool.close()
return times.mean()
def regularize_name(name):
if name == "3C138":
return "0521+166=3C138"
else:
return name
class DataFile:
columns = [
"freq",
"flux_I",
"flux_I_err",
"flux_I_peak",
"flux_I_peak_err",
"flux_Q",
"flux_Q_err",
"flux_Q_peak",
"flux_Q_peak_err",
"flux_U",
"flux_U_err",
"flux_U_peak",
"flux_U_peak_err",
"flux_V",
"flux_V_err",
"flux_V_peak",
"flux_V_peak_err",
]
stokes = ("I", "Q", "U", "V")
def __init__(self, filen):
filen = Path(filen)
assert filen.exists()
with filen.open("r") as f:
line = f.readline().split()
self.field = regularize_name(line[2].rstrip(";"))
self.band = line[4]
self.date = filen.parent.parent.parent.name
mjs = get_mjs_from_ms_path(filen, field=self.field)
df = pd.read_csv(filen, names=self.columns, header=0,
index_col=False, skiprows=2, sep=" ", na_values=0,
comment="#")
df.index.name = "spw"
df.reset_index(inplace=True)
df["field"] = self.field
df["date"] = self.date
df["band"] = self.band
df["mjs"] = mjs
df.set_index(["field", "date", "band", "spw"], inplace=True)
self.df = df
def aggregate_dynamic_seds(parallel=True, nproc=30):
field_files = get_all_field_files()
if parallel:
assert nproc > 0
with Pool(nproc) as pool:
data_files = pool.map(DataFile, field_files)
else:
data_files = [
DataFile(file_path)
for file_path in field_files
]
df = pd.concat([d.df for d in data_files])
return df
class FluxFile:
expression = (
R"# Flux density for (?P<field>.+?) "
R"in SpW=(?P<spw>\d+) "
R"\(freq=(?P<freq>\S+) Hz\) "
R"is: (?P<flux_I>\S+) \+/\- (?P<flux_I_err>\S+) "
R"\(SNR = (?P<snr>\S+?), N = (?P<nchan>\S+?)\)"
)
columns = "field,spw,freq,flux_I,flux_I_err,snr,nchan".split(",")
dtypes = (str, int, float, float, float, float, int)
def __init__(self, filen):
"""
Parameters
----------
filen : str
Full path to fluxscale data file, e.g. "*_A.ms.fscale.dat".
"""
filen = Path(filen)
assert filen.exists()
date = filen.parent.parent.name
p = re.compile(self.expression)
df = pd.DataFrame(columns=self.columns)
with filen.open("r") as f:
lines = f.readlines()
for i, line in enumerate(lines):
try:
group = p.match(line).groupdict()
for col in self.columns:
df.loc[i, col] = group[col]
except AttributeError:
continue
df = df.astype({c: d for c, d in zip(self.columns, self.dtypes)})
df["field"] = df.field.apply(regularize_name)
df["band"] = filen.parent.name
df["date"] = filen.parent.parent.name
df.set_index(["field", "date", "band", "spw"], inplace=True)
self.df = df
def aggregate_flux_files(parallel=True, nproc=30):
flux_files = get_all_fscale_files()
if parallel:
assert nproc > 0
with Pool(nproc) as pool:
flux_files = pool.map(FluxFile, flux_files)
else:
flux_files = [
FluxFile(file_path)
for file_path in flux_files
]
df = pd.concat([f.df for f in flux_files])
return df
def with_csv_ext(name):
if not name.endswith(".csv"):
return f"{name}.csv"
else:
return name
def read_df(filen="survey"):
filen = with_csv_ext(filen)
index_col = ["field", "date", "band", "spw"]
df = | pd.read_csv(PATHS.data/filen, index_col=index_col) | pandas.read_csv |
from datetime import (
datetime,
timedelta,
)
import numpy as np
import pytest
from pandas._libs.tslibs.ccalendar import (
DAYS,
MONTHS,
)
from pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG
from pandas.compat import is_platform_windows
from pandas import (
DatetimeIndex,
Index,
Series,
Timestamp,
date_range,
period_range,
)
import pandas._testing as tm
from pandas.core.tools.datetimes import to_datetime
import pandas.tseries.frequencies as frequencies
import pandas.tseries.offsets as offsets
@pytest.fixture(
params=[
(timedelta(1), "D"),
(timedelta(hours=1), "H"),
(timedelta(minutes=1), "T"),
(timedelta(seconds=1), "S"),
(np.timedelta64(1, "ns"), "N"),
(timedelta(microseconds=1), "U"),
(timedelta(microseconds=1000), "L"),
]
)
def base_delta_code_pair(request):
return request.param
freqs = (
[f"Q-{month}" for month in MONTHS]
+ [f"{annual}-{month}" for annual in ["A", "BA"] for month in MONTHS]
+ ["M", "BM", "BMS"]
+ [f"WOM-{count}{day}" for count in range(1, 5) for day in DAYS]
+ [f"W-{day}" for day in DAYS]
)
@pytest.mark.parametrize("freq", freqs)
@pytest.mark.parametrize("periods", [5, 7])
def test_infer_freq_range(periods, freq):
freq = freq.upper()
gen = date_range("1/1/2000", periods=periods, freq=freq)
index = DatetimeIndex(gen.values)
if not freq.startswith("Q-"):
assert frequencies.infer_freq(index) == gen.freqstr
else:
inf_freq = frequencies.infer_freq(index)
is_dec_range = inf_freq == "Q-DEC" and gen.freqstr in (
"Q",
"Q-DEC",
"Q-SEP",
"Q-JUN",
"Q-MAR",
)
is_nov_range = inf_freq == "Q-NOV" and gen.freqstr in (
"Q-NOV",
"Q-AUG",
"Q-MAY",
"Q-FEB",
)
is_oct_range = inf_freq == "Q-OCT" and gen.freqstr in (
"Q-OCT",
"Q-JUL",
"Q-APR",
"Q-JAN",
)
assert is_dec_range or is_nov_range or is_oct_range
def test_raise_if_period_index():
index = period_range(start="1/1/1990", periods=20, freq="M")
msg = "Check the `freq` attribute instead of using infer_freq"
with pytest.raises(TypeError, match=msg):
frequencies.infer_freq(index)
def test_raise_if_too_few():
index = DatetimeIndex(["12/31/1998", "1/3/1999"])
msg = "Need at least 3 dates to infer frequency"
with pytest.raises(ValueError, match=msg):
frequencies.infer_freq(index)
def test_business_daily():
index = DatetimeIndex(["01/01/1999", "1/4/1999", "1/5/1999"])
assert frequencies.infer_freq(index) == "B"
def test_business_daily_look_alike():
# see gh-16624
#
# Do not infer "B when "weekend" (2-day gap) in wrong place.
index = DatetimeIndex(["12/31/1998", "1/3/1999", "1/4/1999"])
assert frequencies.infer_freq(index) is None
def test_day_corner():
index = DatetimeIndex(["1/1/2000", "1/2/2000", "1/3/2000"])
assert frequencies.infer_freq(index) == "D"
def test_non_datetime_index():
dates = to_datetime(["1/1/2000", "1/2/2000", "1/3/2000"])
assert frequencies.infer_freq(dates) == "D"
def test_fifth_week_of_month_infer():
# see gh-9425
#
# Only attempt to infer up to WOM-4.
index = DatetimeIndex(["2014-03-31", "2014-06-30", "2015-03-30"])
assert frequencies.infer_freq(index) is None
def test_week_of_month_fake():
# All of these dates are on same day
# of week and are 4 or 5 weeks apart.
index = | DatetimeIndex(["2013-08-27", "2013-10-01", "2013-10-29", "2013-11-26"]) | pandas.DatetimeIndex |
import pandas as pd, numpy as np
import multiprocessing, os, warnings
import hpfrec.cython_loops as cython_loops
import ctypes, types, inspect
from scipy.sparse import coo_matrix, csr_matrix
pd.options.mode.chained_assignment = None
class HPF:
"""
Hierarchical Poisson Factorization
Model for recommending items based on probabilistic Poisson factorization
on sparse count data (e.g. number of times a user played different songs),
using mean-field variational inference with coordinate-ascent.
Can also use stochastic variational inference (using mini batches of data).
Can use different stopping criteria for the opimization procedure:
1) Run for a fixed number of iterations (stop_crit='maxiter').
2) Calculate the Poisson log-likelihood every N iterations (stop_crit='train-llk' and check_every)
and stop once {1 - curr/prev} is below a certain threshold (stop_thr)
3) Calculate the Poisson log-likelihood in a user-provided validation set (stop_crit='val-llk', val_set and check_every)
and stop once {1 - curr/prev} is below a certain threshold. For this criterion, you might want to lower the
default threshold (see Note).
4) Check the the difference in the user-factor matrix after every N iterations (stop_crit='diff-norm', check_every)
and stop once the *l2-norm* of this difference is below a certain threshold (stop_thr).
Note that this is **not a percent** difference as it is for log-likelihood criteria, so you should put a larger
value than the default here.
This is a much faster criterion to calculate and is recommended for larger datasets.
If passing reindex=True, it will internally reindex all user and item IDs. Your data will not require
reindexing if the IDs for users and items in counts_df meet the following criteria:
1) Are all integers.
2) Start at zero.
3) Don't have any enumeration gaps, i.e. if there is a user '4', user '3' must also be there.
If you only want to obtain the fitted parameters and use your own API later for recommendations,
you can pass produce_dicts=False and pass a folder where to save them in csv format (they are also
available as numpy arrays in this object's Theta and Beta attributes). Otherwise, the model
will create Python dictionaries with entries for each user and item, which can take quite a bit of
RAM memory. These can speed up predictions later through this package's API.
Passing verbose=True will also print RMSE (root mean squared error) at each iteration.
For slighly better speed pass verbose=False once you know what a good threshold should be
for your data.
Note
----
DataFrames and arrays passed to '.fit' might be modified inplace - if this is a problem you'll
need to pass a copy to them, e.g. 'counts_df=counts_df.copy()'.
Note
----
If 'check_every' is not None and stop_crit is not 'diff-norm', it will, every N iterations,
calculate the log-likelihood of the data. By default, this is NOT the full likelihood, (not including a constant
that depends on the data but not on the parameters and which is quite slow to compute). The reason why
it's calculated by default like this is because otherwise it can result it overflow (number is too big for the data
type), but be aware that if not adding this constant, the number can turn positive
and will mess with the stopping criterion for likelihood.
Note
----
If you pass a validation set, it will calculate the Poisson log-likelihood **of the non-zero observations
only**, rather than the complete likelihood that includes also the combinations of users and items
not present in the data (assumed to be zero), thus it's more likely that you might see positive numbers here.
Note
----
Compared to ALS, iterations from this algorithm are a lot faster to compute, so don't be scared about passing
large numbers for maxiter.
Note
----
In some unlucky cases, the parameters will become NA in the first iteration, in which case you should see
weird values for log-likelihood and RMSE. If this happens, try again with a different random seed.
Note
----
Fitting in mini-batches is more prone to numerical instability and compared to full-batch
variational inference, it is more likely that all your parameters will turn to NaNs (which
means the optimization procedure failed).
Parameters
----------
k : int
Number of latent factors to use.
a : float
Shape parameter for the user-factor matrix.
a_prime : float
Shape parameter and dividend of the rate parameter for the user activity vector.
b_prime : float
Divisor of the rate parameter for the user activity vector.
c : float
Shape parameter for the item-factor matrix.
c_prime : float
Shape parameter and dividend of the rate parameter for the item popularity vector.
d_prime : float
Divisor o the rate parameter for the item popularity vector.
ncores : int
Number of cores to use to parallelize computations.
If set to -1, will use the maximum available on the computer.
stop_crit : str, one of 'maxiter', 'train-llk', 'val-llk', 'diff-norm'
Stopping criterion for the optimization procedure.
check_every : None or int
Calculate log-likelihood every N iterations.
stop_thr : float
Threshold for proportion increase in log-likelihood or l2-norm for difference between matrices.
users_per_batch : None or int
Number of users to take for each batch update in stochastic variational inference. If passing None both here
and for 'items_per_batch', will perform full-batch variational inference, which leads to better results but on
larger datasets takes longer to converge.
If passing a number for both 'users_per_batch' and 'items_per_batch', it will alternate between epochs in which
it samples by user and epochs in which it samples by item - this leads to faster convergence and is recommended,
but using only one type leads to lower memory requirements and might have a use case if memory is limited.
items_per_batch : None or int
Number of items to take for each batch update in stochastic variational inference. If passing None both here
and for 'users_per_batch', will perform full-batch variational inference, which leads to better results but on
larger datasets takes longer to converge.
If passing a number for both 'users_per_batch' and 'items_per_batch', it will alternate between epochs in which
it samples by user and epochs in which it samples by item - this leads to faster convergence and is recommended,
but using only one type leads to lower memory requirements and might have a use case if memory is limited.
step_size : function(int) -> float in (0, 1)
Function that takes the iteration/epoch number as input (starting at zero) and produces the step size
for the global parameters as output (only used when fitting with stochastic variational inference).
The step size must be a number between zero and one, and should be decresing with bigger iteration numbers.
Ignored when passing users_per_batch=None.
maxiter : int or None
Maximum number of iterations for which to run the optimization procedure. This corresponds to epochs when
fitting in batches of users. Recommended to use a lower number when passing a batch size.
reindex : bool
Whether to reindex data internally.
verbose : bool
Whether to print convergence messages.
random_seed : int or None
Random seed to use when starting the parameters.
allow_inconsistent_math : bool
Whether to allow inconsistent floating-point math (producing slightly different results on each run)
which would allow parallelization of the updates for the shape parameters of Lambda and Gamma.
Ignored (forced to True) in stochastic optimization mode.
full_llk : bool
Whether to calculate the full Poisson log-likehood, including terms that don't depend on the model parameters
(thus are constant for a given dataset).
alloc_full_phi : bool
Whether to allocate the full Phi matrix (size n_samples * k) when using stochastic optimization. Doing so
will make it a bit faster, but it will use more memory.
Ignored when passing both 'users_per_batch=None' and 'items_per_batch=None'.
keep_data : bool
Whether to keep information about which user was associated with each item
in the training set, so as to exclude those items later when making Top-N
recommendations.
save_folder : str or None
Folder where to save all model parameters as csv files.
produce_dicts : bool
Whether to produce Python dictionaries for users and items, which
are used to speed-up the prediction API of this package. You can still predict without
them, but it might take some additional miliseconds (or more depending on the
number of users and items).
keep_all_objs : bool
Whether to keep intermediate objects/variables in the object that are not necessary for
predictions - these are: Gamma_shp, Gamma_rte, Lambda_shp, Lambda_rte, k_rte, t_rte
(when passing True here, the model object will have these extra attributes too).
Without these objects, it's not possible to call functions that alter the model parameters
given new information after it's already fit.
sum_exp_trick : bool
Whether to use the sum-exp trick when scaling the multinomial parameters - that is, calculating them as
exp(val - maxval)/sum_{val}(exp(val - maxval)) in order to avoid numerical overflow if there are
too large numbers. For this kind of model, it is unlikely that it will be required, and it adds a
small overhead, but if you notice NaNs in the results or in the likelihood, you might give this option a try.
Attributes
----------
Theta : array (nusers, k)
User-factor matrix.
Beta : array (nitems, k)
Item-factor matrix.
user_mapping_ : array (nusers,)
ID of the user (as passed to .fit) corresponding to each row of Theta.
item_mapping_ : array (nitems,)
ID of the item (as passed to .fit) corresponding to each row of Beta.
user_dict_ : dict (nusers)
Dictionary with the mapping between user IDs (as passed to .fit) and rows of Theta.
item_dict_ : dict (nitems)
Dictionary with the mapping between item IDs (as passed to .fit) and rows of Beta.
is_fitted : bool
Whether the model has been fit to some data.
niter : int
Number of iterations for which the fitting procedure was run.
train_llk : int
Final training likelihood calculated when the model was fit (only when passing 'verbose=True').
References
----------
[1] Scalable Recommendation with Hierarchical Poisson Factorization (<NAME>., <NAME>. and <NAME>., 2015)
[2] Stochastic variational inference (<NAME>., <NAME>., <NAME>. and <NAME>., 2013)
"""
def __init__(self, k=30, a=0.3, a_prime=0.3, b_prime=1.0,
c=0.3, c_prime=0.3, d_prime=1.0, ncores=-1,
stop_crit='train-llk', check_every=10, stop_thr=1e-3,
users_per_batch=None, items_per_batch=None, step_size=lambda x: 1/np.sqrt(x+2),
maxiter=100, reindex=True, verbose=True,
random_seed = None, allow_inconsistent_math=False, full_llk=False,
alloc_full_phi=False, keep_data=True, save_folder=None,
produce_dicts=True, keep_all_objs=True, sum_exp_trick=False):
## checking input
assert isinstance(k, int)
if isinstance(a, int):
a = float(a)
if isinstance(a_prime, int):
a_prime = float(a_prime)
if isinstance(b_prime, int):
b_prime = float(b_prime)
if isinstance(c, int):
c = float(c)
if isinstance(c_prime, int):
c_prime = float(c_prime)
if isinstance(d_prime, int):
d_prime = float(d_prime)
assert isinstance(a, float)
assert isinstance(a_prime, float)
assert isinstance(b_prime, float)
assert isinstance(c, float)
assert isinstance(c_prime, float)
assert isinstance(d_prime, float)
assert a>0
assert a_prime>0
assert b_prime>0
assert c>0
assert c_prime>0
assert d_prime>0
assert k>0
if ncores == -1:
ncores = multiprocessing.cpu_count()
if ncores is None:
ncores = 1
assert ncores>0
assert isinstance(ncores, int)
if random_seed is not None:
assert isinstance(random_seed, int)
assert stop_crit in ['maxiter', 'train-llk', 'val-llk', 'diff-norm']
if maxiter is not None:
assert maxiter>0
assert isinstance(maxiter, int)
else:
if stop_crit == 'maxiter':
raise ValueError("If 'stop_crit' is set to 'maxiter', must provide a maximum number of iterations.")
maxiter = 10**10
if check_every is not None:
assert isinstance(check_every, int)
assert check_every>0
assert check_every<=maxiter
else:
if stop_crit != 'maxiter':
raise ValueError("If 'stop_crit' is not 'maxiter', must input after how many iterations to calculate it.")
check_every = 0
if isinstance(stop_thr, int):
stop_thr = float(stop_thr)
if stop_thr is not None:
assert stop_thr>0
assert isinstance(stop_thr, float)
if save_folder is not None:
save_folder = os.path.expanduser(save_folder)
assert os.path.exists(save_folder)
verbose = bool(verbose)
if (stop_crit == 'maxiter') and (not verbose):
check_every = 0
if not isinstance(step_size, types.FunctionType):
raise ValueError("'step_size' must be a function.")
if len(inspect.getfullargspec(step_size).args) < 1:
raise ValueError("'step_size' must be able to take the iteration number as input.")
assert (step_size(0) >= 0) and (step_size(0) <= 1)
assert (step_size(1) >= 0) and (step_size(1) <= 1)
if users_per_batch is not None:
if isinstance(users_per_batch, float):
users_per_batch = int(users_per_batch)
assert isinstance(users_per_batch, int)
assert users_per_batch > 0
else:
users_per_batch = 0
if items_per_batch is not None:
if isinstance(items_per_batch, float):
items_per_batch = int(items_per_batch)
assert isinstance(items_per_batch, int)
assert items_per_batch > 0
else:
items_per_batch = 0
## storing these parameters
self.k = k
self.a = a
self.a_prime = a_prime
self.b_prime = b_prime
self.c = c
self.c_prime = c_prime
self.d_prime = d_prime
self.ncores = ncores
self.allow_inconsistent_math = bool(allow_inconsistent_math)
self.random_seed = random_seed
self.stop_crit = stop_crit
self.reindex = bool(reindex)
self.keep_data = bool(keep_data)
self.maxiter = maxiter
self.check_every = check_every
self.stop_thr = stop_thr
self.save_folder = save_folder
self.verbose = verbose
self.produce_dicts = bool(produce_dicts)
self.full_llk = bool(full_llk)
self.alloc_full_phi = bool(alloc_full_phi)
self.keep_all_objs = bool(keep_all_objs)
self.sum_exp_trick = bool(sum_exp_trick)
self.step_size = step_size
self.users_per_batch = users_per_batch
self.items_per_batch = items_per_batch
if not self.reindex:
self.produce_dicts = False
## initializing other attributes
self.Theta = None
self.Beta = None
self.user_mapping_ = None
self.item_mapping_ = None
self.user_dict_ = None
self.item_dict_ = None
self.is_fitted = False
self.niter = None
self.train_llk = None
def fit(self, counts_df, val_set=None):
"""
Fit Hierarchical Poisson Model to sparse count data
Fits a hierarchical Poisson model to count data using mean-field approximation with either
full-batch coordinate-ascent or mini-batch stochastic coordinate-ascent.
Note
----
DataFrames and arrays passed to '.fit' might be modified inplace - if this is a problem you'll
need to pass a copy to them, e.g. 'counts_df=counts_df.copy()'.
Note
----
Forcibly terminating the procedure should still keep the last calculated shape and rate
parameter values, but is not recommended. If you need to make predictions on a forced-terminated
object, set the attribute 'is_fitted' to 'True'.
Note
----
Fitting in mini-batches is more prone to numerical instability and compared to full-batch
variational inference, it is more likely that all your parameters will turn to NaNs (which
means the optimization procedure failed).
Parameters
----------
counts_df : pandas data frame (nobs, 3) or coo_matrix
Input data with one row per non-zero observation, consisting of triplets ('UserId', 'ItemId', 'Count').
Must containin columns 'UserId', 'ItemId', and 'Count'.
Combinations of users and items not present are implicitly assumed to be zero by the model.
Can also pass a sparse coo_matrix, in which case 'reindex' will be forced to 'False'.
val_set : pandas data frame (nobs, 3)
Validation set on which to monitor log-likelihood. Same format as counts_df.
Returns
-------
self : obj
Copy of this object
"""
## a basic check
if self.stop_crit == 'val-llk':
if val_set is None:
raise ValueError("If 'stop_crit' is set to 'val-llk', must provide a validation set.")
## running each sub-process
if self.verbose:
self._print_st_msg()
self._process_data(counts_df)
if self.verbose:
self._print_data_info()
if (val_set is not None) and (self.stop_crit!='diff-norm') and (self.stop_crit!='train-llk'):
self._process_valset(val_set)
else:
self.val_set = None
self._cast_before_fit()
self._fit()
## after terminating optimization
if self.keep_data:
if self.users_per_batch == 0:
self._store_metadata()
else:
self._st_ix_user = self._st_ix_user[:-1]
if self.produce_dicts and self.reindex:
self.user_dict_ = {self.user_mapping_[i]:i for i in range(self.user_mapping_.shape[0])}
self.item_dict_ = {self.item_mapping_[i]:i for i in range(self.item_mapping_.shape[0])}
self.is_fitted = True
del self.input_df
del self.val_set
return self
def _process_data(self, input_df):
calc_n = True
if isinstance(input_df, np.ndarray):
assert len(input_df.shape) > 1
assert input_df.shape[1] >= 3
input_df = pd.DataFrame(input_df[:, :3])
input_df.columns = ['UserId', 'ItemId', "Count"]
elif input_df.__class__.__name__ == 'DataFrame':
assert input_df.shape[0] > 0
assert 'UserId' in input_df.columns.values
assert 'ItemId' in input_df.columns.values
assert 'Count' in input_df.columns.values
self.input_df = input_df[['UserId', 'ItemId', 'Count']]
elif input_df.__class__.__name__ == 'coo_matrix':
self.nusers = input_df.shape[0]
self.nitems = input_df.shape[1]
input_df = pd.DataFrame({
'UserId' : input_df.row,
'ItemId' : input_df.col,
'Count' : input_df.data
})
self.reindex = False
calc_n = False
else:
raise ValueError("'input_df' must be a pandas data frame, numpy array, or scipy sparse coo_matrix.")
if self.stop_crit in ['maxiter', 'diff-norm']:
thr = 0
else:
thr = 0.9
obs_zero = input_df.Count.values <= thr
if obs_zero.sum() > 0:
msg = "'counts_df' contains observations with a count value less than 1, these will be ignored."
msg += " Any user or item associated exclusively with zero-value observations will be excluded."
msg += " If using 'reindex=False', make sure that your data still meets the necessary criteria."
msg += " If you still want to use these observations, set 'stop_crit' to 'diff-norm' or 'maxiter'."
warnings.warn(msg)
input_df = input_df.loc[~obs_zero]
if self.reindex:
self.input_df['UserId'], self.user_mapping_ = pd.factorize(self.input_df.UserId)
self.input_df['ItemId'], self.item_mapping_ = pd.factorize(self.input_df.ItemId)
self.nusers = self.user_mapping_.shape[0]
self.nitems = self.item_mapping_.shape[0]
self.user_mapping_ = np.array(self.user_mapping_).reshape(-1)
self.item_mapping_ = np.array(self.item_mapping_).reshape(-1)
if (self.save_folder is not None) and self.reindex:
if self.verbose:
print("\nSaving user and item mappings...\n")
pd.Series(self.user_mapping_).to_csv(os.path.join(self.save_folder, 'users.csv'), index=False)
pd.Series(self.item_mapping_).to_csv(os.path.join(self.save_folder, 'items.csv'), index=False)
else:
if calc_n:
self.nusers = self.input_df.UserId.max() + 1
self.nitems = self.input_df.ItemId.max() + 1
if self.save_folder is not None:
with open(os.path.join(self.save_folder, "hyperparameters.txt"), "w") as pf:
pf.write("a: %.3f\n" % self.a)
pf.write("a_prime: %.3f\n" % self.a_prime)
pf.write("b_prime: %.3f\n" % self.b_prime)
pf.write("c: %.3f\n" % self.c)
pf.write("c_prime: %.3f\n" % self.c_prime)
pf.write("d_prime: %.3f\n" % self.d_prime)
pf.write("k: %d\n" % self.k)
if self.random_seed is not None:
pf.write("random seed: %d\n" % self.random_seed)
else:
pf.write("random seed: None\n")
if self.input_df['Count'].dtype != ctypes.c_float:
self.input_df['Count'] = self.input_df.Count.astype(ctypes.c_float)
if self.input_df['UserId'].dtype != cython_loops.obj_ind_type:
self.input_df['UserId'] = self.input_df.UserId.astype(cython_loops.obj_ind_type)
if self.input_df['ItemId'].dtype != cython_loops.obj_ind_type:
self.input_df['ItemId'] = self.input_df.ItemId.astype(cython_loops.obj_ind_type)
if self.users_per_batch != 0:
if self.nusers < self.users_per_batch:
warnings.warn("Batch size passed is larger than number of users. Will set it to nusers/10.")
self.users_per_batch = ctypes.c_int(np.ceil(self.nusers/10))
self.input_df.sort_values('UserId', inplace=True)
self._store_metadata(for_partial_fit=True)
return None
def _process_valset(self, val_set, valset=True):
if isinstance(val_set, np.ndarray):
assert len(val_set.shape) > 1
assert val_set.shape[1] >= 3
val_set = pd.DataFrame(val_set[:, :3])
val_set.columns = ['UserId', 'ItemId', "Count"]
elif val_set.__class__.__name__ == 'DataFrame':
assert val_set.shape[0] > 0
assert 'UserId' in val_set.columns.values
assert 'ItemId' in val_set.columns.values
assert 'Count' in val_set.columns.values
self.val_set = val_set[['UserId', 'ItemId', 'Count']]
elif val_set.__class__.__name__ == 'coo_matrix':
assert val_set.shape[0] <= self.nusers
assert val_set.shape[1] <= self.nitems
val_set = pd.DataFrame({
'UserId' : val_set.row,
'ItemId' : val_set.col,
'Count' : val_set.data
})
else:
raise ValueError("'val_set' must be a pandas data frame, numpy array, or sparse coo_matrix.")
if self.stop_crit == 'val-llk':
thr = 0
else:
thr = 0.9
obs_zero = self.val_set.Count.values <= thr
if obs_zero.sum() > 0:
msg = "'val_set' contains observations with a count value less than 1, these will be ignored."
warnings.warn(msg)
self.val_set = self.val_set.loc[~obs_zero]
if self.reindex:
self.val_set['UserId'] = pd.Categorical(self.val_set.UserId, self.user_mapping_).codes
self.val_set['ItemId'] = | pd.Categorical(self.val_set.ItemId, self.item_mapping_) | pandas.Categorical |
import os
import sqlalchemy
#library for handle exception
from sqlalchemy.exc import InterfaceError, OperationalError, ProgrammingError
import pandas as pd
from dotenv import load_dotenv
#load .env file
load_dotenv()
#Credential DB Demo
hostdbdemo = os.getenv('hostdbdemo')
portdbdemo = os.getenv('portdbdemo')
userdbdemo = os.getenv('userdbdemo')
passdbdemo = os.getenv('<PASSWORD>dbdemo')
dbdemoname = os.getenv('dbdemoname')
#Credential DB DWH
hostdbdwh = os.getenv('hostdbdwh')
portdbdwh = os.getenv('portdbdwh')
userdbdwh = os.getenv('userdbdwh')
passdbdwh = os.getenv('passdbdwh')
dbdwhname = os.getenv('dbdwhname')
#Open Connection DB Demo
connection_uri = "mariadb+mariadbconnector://"+userdbdemo+":"+passdbdemo+"@"+hostdbdemo+":"+portdbdemo+"/"+dbdemoname
#Open Connection DB DWH
connection_dbdwh = "mariadb+mariadbconnector://"+userdbdwh+":"+passdbdwh+"@"+hostdbdwh+":"+portdbdwh+"/"+dbdwhname
# ETL teomjobgrade & tdimjobgrade
def etl_teomjobgrade():
try:
#open cursor to dbdemo
dbdemo_engine = sqlalchemy.create_engine(connection_uri)
#Test Connection dbdemo
dbdemo_engine.connect()
#query Text teomjobgrade
sqltext = "select grade_code,gradecategory_code,grade_name from teomjobgrade"
#execute sql
esql = pd.read_sql(sqltext,dbdemo_engine)
#set data to dataframe
df_esql = | pd.DataFrame(esql) | pandas.DataFrame |
"""<NAME>., 2019 - 2020. All rights reserved.
This file process the IO for the Text similarity """
import math
import os
import datetime
import shutil
import time
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import similarity.similarity_logging as cl
LOG = cl.get_logger()
def is_nan(value):
""" Function which identifies the "nan" on empty cells """
try:
return math.isnan(float(value))
except ValueError:
return False
class SimilarityIO:
""" This class is used for IO Processing the text similarity tool.
User input file is fetched here, also intermediate file as well as
the final recommendation creating are tasks for this class """
def __init__(self, file_path, uniq_id, col_int, filter_range="60,100", num_html_row=100, is_new_text=False,
new_text=None, report_row_filter=500000):
""" Constructor for Similarity input output processor, which initializes the the input variables needed IO
processing """
LOG.info("\nSimilarity_UI \nValues passed:\n") # pragma: no mutate
self.file_path = file_path
LOG.info("Path:%s", str(self.file_path)) # pragma: no mutate
self.uniq_id = uniq_id
LOG.info("\nUnique ID Column:%s", str(self.uniq_id)) # pragma: no mutate
self.col_int = col_int
LOG.info("\nColumns of Interest:%s", str(self.col_int)) # pragma: no mutate
self.filter_range = str(filter_range)
LOG.info("\nfilter_range value:%s", str(self.filter_range)) # pragma: no mutate
self.num_html_row = num_html_row
LOG.info("\nnumber of html row:%s", str(self.num_html_row)) # pragma: no mutate
self.report_row_filter = report_row_filter
LOG.info("\nnumber of html row split filter:%s", str(self.report_row_filter)) # pragma: no mutate
self.is_new_text = is_new_text
self.new_text = new_text
LOG.info("\nNew_text:%s", str(self.new_text)) # pragma: no mutate
self.data_frame = None
self.uniq_header = None
def __get_file_path(self):
""" Function used for getting the file path where the results can be stored /
from where input is provided"""
if os.path.isfile(self.file_path):
return str(os.path.dirname(self.file_path))
return self.file_path
def __get_file_name(self):
""" Function used for getting the input file name which can be further used for naming
the result """
if os.path.isfile(self.file_path):
file_path = self.file_path.split("/")
return os.path.splitext(file_path[-1])[0]
return "similarity"
def __get_header(self):
""" Function to fetch the header from the input file read in the dataframe """
return list(self.data_frame.columns.values)
def __set_uniq_header(self):
""" Function to fetch the unique ID header """
sheet_headers = self.__get_header()
self.uniq_header = sheet_headers[int(self.uniq_id)]
def __get_duplicate_id(self):
""" Function which identifies if any duplicate ID present in the input file """
unique_id_frame = pd.Series(self.data_frame[self.uniq_header]).fillna(method='ffill')
unique_id_frame = unique_id_frame.mask((unique_id_frame.shift(1) == unique_id_frame))
__duplicated_list = list(unique_id_frame.duplicated())
__du_list = []
# Remove the 'NaN' in case of empty cell and filter only IDs
for key, item in enumerate(__duplicated_list):
if item:
__du_list.append(unique_id_frame[key])
du_list = list(map(lambda x: 0 if is_nan(x) else x, __du_list))
__data = {"Duplicate ID": [nonzero for nonzero in du_list if nonzero != 0]}
# Create DataFrame and write
self.__write_xlsx(pd.DataFrame(__data), "Duplicate_ID")
def __get_ip_file_type(self):
""" Function to return the file extension type"""
file_type = self.file_path.split(".")[-1]
return file_type.upper()
def __read_to_panda_df(self):
""" Function which read the input data/xlsx to a pandas Data frame """
if not os.path.exists(self.file_path):
LOG.error("\nFile path is invalid") # pragma: no mutate
return False
function_dict = {
"XLSX": lambda x: | pd.read_excel(self.file_path) | pandas.read_excel |
# *- coding: utf-8 -*
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator, MultipleLocator
# from model.ESPNet_v2.SegmentationModel import EESPNet_Seg
# from model.CGNet import CGNet
# from model.ContextNet import ContextNet
# from model.DABNet import DABNet
# from model.EDANet import EDANet
# from model.ENet import ENet
# from model.ERFNet import ERFNet
# from model.ESNet import ESNet
# from model.ESPNet import ESPNet
# from model.FastSCNN import FastSCNN
# from model.FPENet import FPENet
# from model.FSSNet import FSSNet
# from model.LEDNet import LEDNet
# from model.LinkNet import LinkNet
# from model.SegNet import SegNet
# from model.SQNet import SQNet
# from model.UNet import UNet
pd.set_option('display.width', 1000)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
def analysisG(): # 对比比原网络和CONV6*6
#
# https://www.cnblogs.com/happymeng/p/10481293.html
dataset = 'camvid352'
module_names = ['DABNet', 'ESNet', 'FastSCNN', 'LEDNet', 'FPENet', 'DF1Seg']
train_types = ['bs2gpu1_train']
losses = pd.DataFrame()
mious = pd.DataFrame()
for n in module_names:
for t in train_types:
try:
df1 = pd.read_table("./checkpoint/" + dataset + "/" + n + t + "/log.txt", sep='\t\t', skiprows=2,
header=None,
names=['Epoch', 'Loss(Tr)', 'mIOU(val)', 'lr'], engine='python')
df2 = pd.read_table("./checkpoint/" + dataset + "/" + n + 'G' + t + "/log.txt", sep='\t\t', skiprows=2,
header=None,
names=['Epoch', 'Loss(Tr)', 'mIOU(val)', 'lr'], engine='python')
# df_loss = df1.loc[:,['Epoch']]
df_loss = df1[['Epoch']].copy()
df_loss[n] = df1['Loss(Tr)']
df_loss[n + 'G'] = df2['Loss(Tr)']
df_loss.plot(x='Epoch')
plt.savefig("./checkpoint/" + dataset + "/" + n + t + "_loss_vs_epochs.png")
plt.clf()
df_miou = df1[['Epoch']].copy()
df_miou[n] = df1['mIOU(val)']
df_miou[n + 'G'] = df2['mIOU(val)']
df_miou = df_miou.dropna(axis=0, how='any')
df_miou.plot(x='Epoch')
plt.savefig("./checkpoint/" + dataset + "/" + n + t + "_iou_vs_epochs.png")
plt.clf()
if len(losses.index) == 0:
losses = df_loss.copy()
mious = df_miou.copy()
else:
losses = pd.merge(losses, df_loss)
mious = pd.merge(mious, df_miou)
except:
pass
losses[500:].plot(x='Epoch')
plt.savefig("./checkpoint/" + dataset + "/" + t + "all_loss_vs_epochs.png")
plt.clf()
mious[4:].plot(x='Epoch')
plt.savefig("./checkpoint/" + dataset + "/" + t + "_all_iou_vs_epochs.png")
plt.close('all')
def fastscnn(): # compair with FastSCNN
dataset = 'FastSCNN/FastSCNN-4'
module_names = ['FastSCNN', 'FastSCNNG1', 'FastSCNNG3', 'FastSCNNG6']
train_types = ['bs4gpu1_train']
losses = pd.DataFrame()
mious = pd.DataFrame()
for t in train_types:
for n in module_names:
df = pd.read_table("./checkpoint/" + dataset + "/" + n + t + "/log.txt", sep='\t\t', skiprows=2,
header=None,
names=['Epoch', 'Loss(Tr)', 'mIOU(val)', 'lr'], engine='python')
if len(losses.index) == 0:
losses = df[['Epoch']].copy()
mious = df[['Epoch']].copy()
losses[n] = df['Loss(Tr)']
mious[n] = df['mIOU(val)']
else:
losses[n] = df['Loss(Tr)']
mious[n] = df['mIOU(val)']
losses.plot(x='Epoch')
plt.savefig("./checkpoint/" + dataset + "/" + 'FastSCNN1234' + train_types[0] + "_loss_vs_epochs.png")
plt.clf()
mious = mious.dropna(axis=0, how='any')
mious.plot(x='Epoch')
plt.savefig("./checkpoint/" + dataset + "/" + 'FastSCNN1234' + train_types[0] + "_iou_vs_epochs.png")
plt.clf()
# plt.close('all')
print(mious)
def fastscnn_mean(): # compair with FastSCNN
datasets = ['FastSCNN/FastSCNN-{}'.format(i) for i in range(1, 11)]
module_names = ['FastSCNN', 'FastSCNNG1', 'FastSCNNG3', 'FastSCNNG6', 'FastSCNNG7', 'FastSCNNG8']
train_types = ['bs4gpu1_train']
losses_mean = pd.DataFrame()
mious_mean = pd.DataFrame()
t = train_types[0]
for n in module_names:
losses = pd.DataFrame()
mious = pd.DataFrame()
for d in datasets:
df = pd.read_table("./checkpoint/" + d + "/" + n + t + "/log.txt", sep='\t\t', skiprows=2,
header=None,
names=['Epoch', 'Loss(Tr)', 'mIOU(val)', 'lr'], engine='python')
if len(losses.index) == 0:
losses = df[['Epoch']].copy()
mious = df[['Epoch']].copy()
losses[d + '/' + n] = df['Loss(Tr)']
mious[d + '/' + n] = df['mIOU(val)']
else:
losses[d + '/' + n] = df['Loss(Tr)']
mious[d + '/' + n] = df['mIOU(val)']
tmp = losses.drop(['Epoch'], axis=1)
losses[n + '_avg'] = tmp.mean(axis=1)
losses.plot(x='Epoch')
plt.savefig("./checkpoint/" + 'FastSCNN' + "/" + n + t + "_loss_vs_epochs.png")
plt.clf()
mious = mious.dropna(axis=0, how='any')
tmp = mious.drop(['Epoch'], axis=1)
mious[n + '_avg'] = tmp.mean(axis=1)
mious_mean[n + '_avg'] = mious[n + '_avg']
mious.plot(x='Epoch')
plt.savefig("./checkpoint/" + 'FastSCNN' + "/" + n + t + "_iou_vs_epochs.png")
plt.clf()
# plt.close('all')
print(mious)
print(mious_mean)
mious_mean.to_csv('FastSCNNG_mious_mean.csv')
mious_mean.plot()
plt.savefig("./checkpoint/" + 'FastSCNN' + "/" + "FastSCNN" + t + "_avg_iou_vs_epochs.png")
plt.clf()
def lednet_mean(): # compair with FastSCNN
datasets = ['LEDNet/LEDNet-{}'.format(i) for i in range(1, 2)]
module_names = ['LEDNet', 'LEDNetG1', 'LEDNetG2', 'LEDNetG3']
train_types = ['bs4gpu1_train']
losses_mean = pd.DataFrame()
mious_mean = pd.DataFrame()
t = train_types[0]
for n in module_names:
losses = pd.DataFrame()
mious = pd.DataFrame()
for d in datasets:
df = pd.read_table("./checkpoint/" + d + "/" + n + t + "/log.txt", sep='\t\t', skiprows=2,
header=None,
names=['Epoch', 'Loss(Tr)', 'mIOU(val)', 'lr'], engine='python')
if len(losses.index) == 0:
losses = df[['Epoch']].copy()
mious = df[['Epoch']].copy()
losses[d + '/' + n] = df['Loss(Tr)']
mious[d + '/' + n] = df['mIOU(val)']
else:
losses[d + '/' + n] = df['Loss(Tr)']
mious[d + '/' + n] = df['mIOU(val)']
tmp = losses.drop(['Epoch'], axis=1)
losses[n + '_avg'] = tmp.mean(axis=1)
losses.plot(x='Epoch')
plt.savefig("./checkpoint/" + 'LEDNet' + "/" + n + t + "_loss_vs_epochs.png")
plt.clf()
mious = mious.dropna(axis=0, how='any')
tmp = mious.drop(['Epoch'], axis=1)
mious[n + '_avg'] = tmp.mean(axis=1)
mious_mean[n + '_avg'] = mious[n + '_avg']
mious.plot(x='Epoch')
plt.savefig("./checkpoint/" + 'LEDNet' + "/" + n + t + "_iou_vs_epochs.png")
plt.clf()
# plt.close('all')
print(mious)
print(mious_mean)
mious_mean.plot(figsize=(20, 15))
plt.savefig("./checkpoint/" + 'LEDNet' + "/" + "LEDNet" + t + "_avg_iou_vs_epochs.png")
plt.clf()
def fpenet_mean(): # compair with FastSCNN
datasets = ['FPENet/FPENet-{}'.format(i) for i in range(1, 2)]
module_names = ['FPENet', 'FPENetG0', 'FPENetG1', 'FPENetG2']
train_types = ['bs4gpu1_train']
losses_mean = pd.DataFrame()
mious_mean = pd.DataFrame()
t = train_types[0]
for n in module_names:
losses = pd.DataFrame()
mious = pd.DataFrame()
for d in datasets:
df = pd.read_table("./checkpoint/" + d + "/" + n + t + "/log.txt", sep='\t\t', skiprows=2,
header=None,
names=['Epoch', 'Loss(Tr)', 'mIOU(val)', 'lr'], engine='python')
if len(losses.index) == 0:
losses = df[['Epoch']].copy()
mious = df[['Epoch']].copy()
losses[d + '/' + n] = df['Loss(Tr)']
mious[d + '/' + n] = df['mIOU(val)']
else:
losses[d + '/' + n] = df['Loss(Tr)']
mious[d + '/' + n] = df['mIOU(val)']
tmp = losses.drop(['Epoch'], axis=1)
losses[n + '_avg'] = tmp.mean(axis=1)
losses.plot(x='Epoch')
plt.savefig("./checkpoint/" + 'FPENet' + "/" + n + t + "_loss_vs_epochs.png")
plt.clf()
mious = mious.dropna(axis=0, how='any')
tmp = mious.drop(['Epoch'], axis=1)
mious[n + '_avg'] = tmp.mean(axis=1)
mious_mean[n + '_avg'] = mious[n + '_avg']
mious.plot(x='Epoch')
plt.savefig("./checkpoint/" + 'FPENet' + "/" + n + t + "_iou_vs_epochs.png")
plt.clf()
# plt.close('all')
print(mious)
print(mious_mean)
mious_mean.plot(figsize=(20, 15))
plt.savefig("./checkpoint/" + 'FPENet' + "/" + "FPENet" + t + "_avg_iou_vs_epochs.png")
plt.clf()
def LEDNet_19_a(): #
dataset = 'camvid352'
module_names = ['LEDNet', 'LEDNet_19']
train_types = ['bs4gpu1_train']
losses = pd.DataFrame()
mious = pd.DataFrame()
for t in train_types:
for n in module_names:
df = pd.read_table("./checkpoint/" + dataset + "/" + n + t + "/log.txt", sep='\t\t', skiprows=2,
header=None,
names=['Epoch', 'Loss(Tr)', 'mIOU(val)', 'lr'], engine='python')
if len(losses.index) == 0:
losses = df[['Epoch']].copy()
mious = df[['Epoch']].copy()
losses[n] = df['Loss(Tr)']
mious[n] = df['mIOU(val)']
else:
losses[n] = df['Loss(Tr)']
mious[n] = df['mIOU(val)']
losses.plot(x='Epoch')
plt.savefig("./checkpoint/" + dataset + "/" + 'LEDNet_19' + train_types[0] + "_loss_vs_epochs.png")
plt.clf()
mious = mious.dropna(axis=0, how='any')
mious.plot(x='Epoch')
plt.savefig("./checkpoint/" + dataset + "/" + 'LEDNet_19' + train_types[0] + "_iou_vs_epochs.png")
plt.clf()
def ESNet_bi_a(): #
dataset = 'camvid352'
module_names = ['ESNet', 'ESNet_bi']
train_types = ['bs4gpu1_train']
losses = pd.DataFrame()
mious = pd.DataFrame()
for t in train_types:
for n in module_names:
df = pd.read_table("./checkpoint/" + dataset + "/" + n + t + "/log.txt", sep='\t\t', skiprows=2,
header=None,
names=['Epoch', 'Loss(Tr)', 'mIOU(val)', 'lr'], engine='python')
if len(losses.index) == 0:
losses = df[['Epoch']].copy()
mious = df[['Epoch']].copy()
losses[n] = df['Loss(Tr)']
mious[n] = df['mIOU(val)']
else:
losses[n] = df['Loss(Tr)']
mious[n] = df['mIOU(val)']
losses.plot(x='Epoch')
plt.savefig("./checkpoint/" + dataset + "/" + 'ESNet_bi' + train_types[0] + "_loss_vs_epochs.png")
plt.clf()
mious = mious.dropna(axis=0, how='any')
mious.plot(x='Epoch')
plt.savefig("./checkpoint/" + dataset + "/" + 'ESNet_bi' + train_types[0] + "_iou_vs_epochs.png")
plt.clf()
print(mious)
def all_net(): # compair with FastSCNN
dataset = 'camvid352'
module_names = ['CGNet', 'ContextNet', 'DABNet', 'DF1Seg', 'EDANet', 'ENet', 'ERFNet', 'ESNet', 'ESPNet',
'FPENet', 'FSSNet', 'LEDNet', 'LinkNet', 'SegNet', 'SQNet', 'UNet', 'ESPNet_v2']
train_types = ['bs4gpu1_train']
losses = pd.DataFrame()
mious = pd.DataFrame()
for t in train_types:
for n in module_names:
df = pd.read_table("./checkpoint/" + dataset + "/" + n + t + "/log.txt", sep='\t\t', skiprows=2,
header=None,
names=['Epoch', 'Loss(Tr)', 'mIOU(val)', 'lr'], engine='python')
if len(losses.index) == 0:
losses = df[['Epoch']].copy()
mious = df[['Epoch']].copy()
losses[n] = df['Loss(Tr)']
mious[n] = df['mIOU(val)']
else:
losses[n] = df['Loss(Tr)']
mious[n] = df['mIOU(val)']
losses.plot(x='Epoch', figsize=(16, 8))
plt.savefig("./checkpoint/" + dataset + "/" + 'ALL_' + train_types[0] + "_loss_vs_epochs.png")
plt.clf()
mious = mious.dropna(axis=0, how='any')
mious.plot(x='Epoch', figsize=(20, 15))
plt.savefig("./checkpoint/" + dataset + "/" + 'ALL_' + train_types[0] + "_iou_vs_epochs.png")
plt.clf()
s = mious.sort_values(by=999, axis=1)
# plt.close('all')
print(s)
def networks_mean(module_names, name='FastSCNN', version='1', train_types='bs4gpu1_train'): # compair with FastSCNN
datasets = [name + version + '/' + name + '-{}'.format(i) for i in range(1, 11)]
# module_names = ['FastSCNN','FastSCNNG1', 'FastSCNNG3', 'FastSCNNG6','FastSCNNG7','FastSCNNG8']
# losses_mean = pd.DataFrame()
mious_mean = pd.DataFrame()
t = train_types
for n in module_names:
losses = pd.DataFrame()
mious = pd.DataFrame()
for d in datasets:
try:
df = pd.read_table("./checkpoint/" + d + "/" + n + t + "/log.txt", sep='\t\t', skiprows=2,
header=None,
names=['Epoch', 'Loss(Tr)', 'mIOU(val)', 'lr'], engine='python')
if len(losses.index) == 0:
losses = df[['Epoch']].copy()
mious = df[['Epoch']].copy()
losses[d + '/' + n] = df['Loss(Tr)']
mious[d + '/' + n] = df['mIOU(val)']
else:
losses[d + '/' + n] = df['Loss(Tr)']
mious[d + '/' + n] = df['mIOU(val)']
except:
print(d + '/' + n + ' is training')
tmp = losses.drop(['Epoch'], axis=1)
losses[n + '_avg'] = tmp.mean(axis=1)
losses.plot(x='Epoch')
plt.savefig("./checkpoint/" + name + version + "/" + n + t + "_loss_vs_epochs.png")
plt.clf()
mious = mious.dropna(axis=0, how='any')
tmp = mious.drop(['Epoch'], axis=1)
mious[n + '_avg'] = tmp.mean(axis=1)
mious_mean[n + '_avg'] = mious[n + '_avg']
mious.plot(x='Epoch')
plt.savefig("./checkpoint/" + name + version + "/" + n + t + "_iou_vs_epochs.png")
plt.clf()
# plt.close('all')
print(mious)
print(mious_mean)
mious_mean.plot()
plt.savefig("./checkpoint/" + name + version + "/" + name + version + t + "_avg_iou_vs_epochs.png")
plt.clf()
def networkXs_mean(module_names, name='FastSCNNX',
train_types=['bs8gpu1_train_False', 'bs8gpu1_train_True']): # compair with FastSCNN
datasets = [name + '/' + name + '-{}'.format(i) for i in range(3, 18)]
mious_mean = pd.DataFrame()
# t = train_types
for t in train_types:
for n in module_names:
losses = | pd.DataFrame() | pandas.DataFrame |
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from ctypes import (
Structure,
c_ubyte,
c_uint,
c_ulong,
c_ulonglong,
c_ushort,
sizeof,
)
import numpy as np
import pandas as pd
from six.moves import range
_inttypes_map = OrderedDict(sorted([
(sizeof(t) - 1, t) for t in {
c_ubyte,
c_uint,
c_ulong,
c_ulonglong,
c_ushort
}
]))
_inttypes = list(
| pd.Series(_inttypes_map) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
get_ipython().run_line_magic('matplotlib', 'inline')
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
df = pd.read_csv('../input/kc_house_data.csv')
df.head()
# In[ ]:
# Get Year and Month
df['Year'], df['Month'] = df['date'].str[:4], df['date'].str[4:6]
df.head()
# In[ ]:
# Review info
df.info()
# In[ ]:
plt.figure(figsize=(15,12))
mask = np.zeros_like(df.corr())
mask[np.triu_indices_from(mask)] = True
with sns.axes_style('white'):
ax = sns.heatmap(df.corr(), mask=mask, vmax=.3, annot=True)
# In[ ]:
# Split label from X
y = df['price']
X = df.drop('price', axis=1)
# In[ ]:
# Convert yr_renovated to years since renovation
X['sold_year'] = X['date'].apply(lambda x: int(x[:4]))
X['yrs_since_renovated'] = (X['sold_year'] - X['yr_renovated'][X['yr_renovated'] != 0]).fillna(0)
# Create dummy features for zip code
zip_dummies = | pd.get_dummies(X['zipcode'], prefix='zipcode') | pandas.get_dummies |
import pandas as pd
import numpy as np
import math
import os
from scipy.interpolate import interp1d
import time
from sklearn.ensemble import RandomForestRegressor
import xgboost as xgb
from lightgbm import LGBMRegressor
from catboost import CatBoostRegressor
from information_measures import *
from joblib import Parallel, delayed
#from arch import arch_model
def rmspe(y_true, y_pred):
return (np.sqrt(np.mean(np.square((y_true - y_pred) / y_true))))
def log_return(list_stock_prices): # Stock prices are estimated through wap values
return np.log(list_stock_prices).diff()
def realized_volatility(series_log_return):
return np.sqrt(np.sum(series_log_return**2))
def compute_wap(book_pd):
wap = (book_pd['bid_price1'] * book_pd['ask_size1'] + book_pd['ask_price1'] * book_pd['bid_size1']) / (book_pd['bid_size1']+ book_pd['ask_size1'])
return wap
def realized_volatility_from_book_pd(book_stock_time):
wap = compute_wap(book_stock_time)
returns = log_return(wap)
volatility = realized_volatility(returns)
return volatility
def realized_volatility_per_time_id(file_path, prediction_column_name):
df_book_data = pd.read_parquet(file_path)
# Estimate stock price per time point
df_book_data['wap'] = compute_wap(df_book_data)
# Compute log return from wap values per time_id
df_book_data['log_return'] = df_book_data.groupby(['time_id'])['wap'].apply(log_return)
df_book_data = df_book_data[~df_book_data['log_return'].isnull()]
# Compute the square root of the sum of log return squared to get realized volatility
df_realized_vol_per_stock = pd.DataFrame(df_book_data.groupby(['time_id'])['log_return'].agg(realized_volatility)).reset_index()
# Formatting
df_realized_vol_per_stock = df_realized_vol_per_stock.rename(columns = {'log_return':prediction_column_name})
stock_id = file_path.split('=')[1]
df_realized_vol_per_stock['row_id'] = df_realized_vol_per_stock['time_id'].apply(lambda x:f'{stock_id}-{x}')
return df_realized_vol_per_stock[['row_id',prediction_column_name]]
def past_realized_volatility_per_stock(list_file,prediction_column_name):
df_past_realized = pd.DataFrame()
for file in list_file:
df_past_realized = pd.concat([df_past_realized,
realized_volatility_per_time_id(file,prediction_column_name)])
return df_past_realized
def stupidForestPrediction(book_path_train,prediction_column_name,train_targets_pd,book_path_test):
naive_predictions_train = past_realized_volatility_per_stock(list_file=book_path_train,prediction_column_name=prediction_column_name)
df_joined_train = train_targets_pd.merge(naive_predictions_train[['row_id','pred']], on = ['row_id'], how = 'left')
X = np.array(df_joined_train['pred']).reshape(-1,1)
y = np.array(df_joined_train['target']).reshape(-1,)
regr = RandomForestRegressor(random_state=0)
regr.fit(X, y)
naive_predictions_test = past_realized_volatility_per_stock(list_file=book_path_test,prediction_column_name='target')
yhat = regr.predict(np.array(naive_predictions_test['target']).reshape(-1,1))
updated_predictions = naive_predictions_test.copy()
updated_predictions['target'] = yhat
return updated_predictions
def garch_fit_predict_volatility(returns_series, N=10000):
model = arch_model(returns_series * N, p=1, q=1)
model_fit = model.fit(update_freq=0, disp='off')
yhat = model_fit.forecast(horizon=600, reindex=False)
pred_volatility = np.sqrt(np.sum(yhat.variance.values)) / N
return pred_volatility
def garch_volatility_per_time_id(file_path, prediction_column_name):
# read the data
df_book_data = pd.read_parquet(file_path)
# calculate the midprice (not the WAP)
df_book_data['midprice'] =(df_book_data['bid_price1'] + df_book_data['ask_price1'])/2
# leave only WAP for now
df_book_data = df_book_data[['time_id', 'seconds_in_bucket', 'midprice']]
df_book_data = df_book_data.sort_values('seconds_in_bucket')
# make the book updates evenly spaced
df_book_data_evenly = pd.DataFrame({'time_id':np.repeat(df_book_data['time_id'].unique(), 600),
'second':np.tile(range(0,600), df_book_data['time_id'].nunique())})
df_book_data_evenly['second'] = df_book_data_evenly['second'].astype(np.int16)
df_book_data_evenly = df_book_data_evenly.sort_values('second')
df_book_data_evenly = pd.merge_asof(df_book_data_evenly,
df_book_data,
left_on='second',right_on='seconds_in_bucket',
by = 'time_id')
# Ordering for easier use
df_book_data_evenly = df_book_data_evenly[['time_id', 'second', 'midprice']]
df_book_data_evenly = df_book_data_evenly.sort_values(['time_id','second']).reset_index(drop=True)
# calculate log returns
df_book_data_evenly['log_return'] = df_book_data_evenly.groupby(['time_id'])['midprice'].apply(log_return)
df_book_data_evenly = df_book_data_evenly[~df_book_data_evenly['log_return'].isnull()]
# fit GARCH(1, 1) and predict the volatility of returns
df_garch_vol_per_stock = \
pd.DataFrame(df_book_data_evenly.groupby(['time_id'])['log_return'].agg(garch_fit_predict_volatility)).reset_index()
df_garch_vol_per_stock = df_garch_vol_per_stock.rename(columns = {'log_return':prediction_column_name})
# add row_id column to the data
stock_id = file_path.split('=')[1]
df_garch_vol_per_stock['row_id'] = df_garch_vol_per_stock['time_id'].apply(lambda x:f'{stock_id}-{x}')
# return the result
return df_garch_vol_per_stock[['row_id', prediction_column_name]]
def garch_volatility_per_stock(list_file, prediction_column_name):
df_garch_predicted = pd.DataFrame()
for file in list_file:
df_garch_predicted = pd.concat([df_garch_predicted,
garch_volatility_per_time_id(file, prediction_column_name)])
return df_garch_predicted
def entropy_from_book(book_stock_time,last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 3:
return 0
wap = compute_wap(book_stock_time)
t_init = book_stock_time['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, wap, kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_wap(wap,seconds,last_seconds):
if last_seconds < 600:
idx = np.where(seconds >= last_seconds)[0]
if len(idx) < 3:
return 0
else:
wap = wap[idx]
seconds = seconds[idx]
# Closest neighbour interpolation (no changes in wap between lines)
t_new = np.arange(np.min(seconds),np.max(seconds))
nearest = interp1d(seconds, wap, kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
# sampleEntropy = ApEn_new(resampled_wap,3,0.001)
return sampleEntropy
def linearFit(book_stock_time, last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 2:
return 0
wap = np.array(compute_wap(book_stock_time))
t_init = book_stock_time['seconds_in_bucket']
return (wap[-1] - wap[0])/(np.max(t_init) - np.min(t_init))
def wapStat(book_stock_time, last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 2:
return 0
wap = compute_wap(book_stock_time)
t_init = book_stock_time['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, wap, kind='nearest')
resampled_wap = nearest(t_new)
return np.std(resampled_wap)
def entropy_Prediction(book_path_train,prediction_column_name,train_targets_pd,book_path_test,all_stocks_ids,test_file):
# Compute features
book_features_encoded_test = computeFeatures_1(book_path_test,'test',test_file,all_stocks_ids)
book_features_encoded_train = computeFeatures_1(book_path_train,'train',train_targets_pd,all_stocks_ids)
X = book_features_encoded_train.drop(['row_id','target','stock_id'],axis=1)
y = book_features_encoded_train['target']
# Modeling
catboost_default = CatBoostRegressor(verbose=0)
catboost_default.fit(X,y)
# Predict
X_test = book_features_encoded_test.drop(['row_id','stock_id'],axis=1)
yhat = catboost_default.predict(X_test)
# Formatting
yhat_pd = pd.DataFrame(yhat,columns=['target'])
predictions = pd.concat([test_file,yhat_pd],axis=1)
return predictions
def computeFeatures_1(book_path,prediction_column_name,train_targets_pd,all_stocks_ids):
book_all_features = pd.DataFrame()
encoder = np.eye(len(all_stocks_ids))
stocks_id_list, row_id_list = [], []
volatility_list, entropy2_list = [], []
linearFit_list, linearFit5_list, linearFit2_list = [], [], []
wap_std_list, wap_std5_list, wap_std2_list = [], [], []
for file in book_path:
start = time.time()
book_stock = pd.read_parquet(file)
stock_id = file.split('=')[1]
print('stock id computing = ' + str(stock_id))
stock_time_ids = book_stock['time_id'].unique()
for time_id in stock_time_ids:
# Access book data at this time + stock
book_stock_time = book_stock[book_stock['time_id'] == time_id]
# Create feature matrix
stocks_id_list.append(stock_id)
row_id_list.append(str(f'{stock_id}-{time_id}'))
volatility_list.append(realized_volatility_from_book_pd(book_stock_time=book_stock_time))
entropy2_list.append(entropy_from_book(book_stock_time=book_stock_time,last_min=2))
linearFit_list.append(linearFit(book_stock_time=book_stock_time,last_min=10))
linearFit5_list.append(linearFit(book_stock_time=book_stock_time,last_min=5))
linearFit2_list.append(linearFit(book_stock_time=book_stock_time,last_min=2))
wap_std_list.append(wapStat(book_stock_time=book_stock_time,last_min=10))
wap_std5_list.append(wapStat(book_stock_time=book_stock_time,last_min=5))
wap_std2_list.append(wapStat(book_stock_time=book_stock_time,last_min=2))
print('Computing one stock entropy took', time.time() - start, 'seconds for stock ', stock_id)
# Merge targets
stocks_id_pd = pd.DataFrame(stocks_id_list,columns=['stock_id'])
row_id_pd = pd.DataFrame(row_id_list,columns=['row_id'])
volatility_pd = pd.DataFrame(volatility_list,columns=['volatility'])
entropy2_pd = pd.DataFrame(entropy2_list,columns=['entropy2'])
linearFit_pd = pd.DataFrame(linearFit_list,columns=['linearFit_coef'])
linearFit5_pd = pd.DataFrame(linearFit5_list,columns=['linearFit_coef5'])
linearFit2_pd = pd.DataFrame(linearFit2_list,columns=['linearFit_coef2'])
wap_std_pd = pd.DataFrame(wap_std_list,columns=['wap_std'])
wap_std5_pd = pd.DataFrame(wap_std5_list,columns=['wap_std5'])
wap_std2_pd = pd.DataFrame(wap_std2_list,columns=['wap_std2'])
book_all_features = pd.concat([stocks_id_pd,row_id_pd,volatility_pd,entropy2_pd,linearFit_pd,linearFit5_pd,linearFit2_pd,
wap_std_pd,wap_std5_pd,wap_std2_pd],axis=1)
# This line makes sure the predictions are aligned with the row_id in the submission file
book_all_features = train_targets_pd.merge(book_all_features, on = ['row_id'])
# Add encoded stock
encoded = list()
for i in range(book_all_features.shape[0]):
stock_id = book_all_features['stock_id'][i]
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(book_all_features.shape[0],np.array(all_stocks_ids).shape[0]))
book_all_features_encoded = pd.concat([book_all_features, encoded_pd],axis=1)
return book_all_features_encoded
def calc_wap(df):
return (df['bid_price1'] * df['ask_size1'] + df['ask_price1'] * df['bid_size1']) / (df['bid_size1'] + df['ask_size1'])
def calc_wap2(df):
return (df['bid_price2'] * df['ask_size2'] + df['ask_price2'] * df['bid_size2']) / (df['bid_size2'] + df['ask_size2'])
def calc_wap3(df):
return (df['bid_price2'] * df['bid_size2'] + df['ask_price2'] * df['ask_size2']) / (df['bid_size2'] + df['ask_size2'])
def calc_wap4(df):
return (df['bid_price1'] * df['bid_size1'] + df['ask_price1'] * df['ask_size1']) / (df['bid_size1'] + df['ask_size1'])
def mid_price(df):
return df['bid_price1'] /2 + df['ask_price1'] / 2
def calc_rv_from_wap_numba(values, index):
log_return = np.diff(np.log(values))
realized_vol = np.sqrt(np.sum(np.square(log_return[1:])))
return realized_vol
def load_book_data_by_id(stock_id,datapath,train_test):
file_to_read = os.path.join(datapath,'book_' + str(train_test) + str('.parquet'),'stock_id=' + str(stock_id))
df = pd.read_parquet(file_to_read)
return df
def load_trades_data_by_id(stock_id,datapath,train_test):
file_to_read = os.path.join(datapath,'trade_' + str(train_test) + str('.parquet'),'stock_id=' + str(stock_id))
df = pd.read_parquet(file_to_read)
return df
def entropy_from_df(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_df2(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap2'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_df3(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap3'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def financial_metrics(df):
wap_imbalance = np.mean(df['wap'] - df['wap2'])
price_spread = np.mean((df['ask_price1'] - df['bid_price1']) / ((df['ask_price1'] + df['bid_price1'])/2))
bid_spread = np.mean(df['bid_price1'] - df['bid_price2'])
ask_spread = np.mean(df['ask_price1'] - df['ask_price2']) # Abs to take
total_volume = np.mean((df['ask_size1'] + df['ask_size2']) + (df['bid_size1'] + df['bid_size2']))
volume_imbalance = np.mean(abs((df['ask_size1'] + df['ask_size2']) - (df['bid_size1'] + df['bid_size2'])))
return [wap_imbalance,price_spread,bid_spread,ask_spread,total_volume,volume_imbalance]
def financial_metrics_2(df):
wap_imbalance = df['wap'] - df['wap2']
price_spread = (df['ask_price1'] - df['bid_price1']) / ((df['ask_price1'] + df['bid_price1'])/2)
bid_spread = df['bid_price1'] - df['bid_price2']
ask_spread = df['ask_price1'] - df['ask_price2'] # Abs to take
total_volume = (df['ask_size1'] + df['ask_size2']) + (df['bid_size1'] + df['bid_size2'])
volume_imbalance = abs((df['ask_size1'] + df['ask_size2']) - (df['bid_size1'] + df['bid_size2']))
# New features here
wap_imbalance_mean = np.mean(wap_imbalance)
wap_imbalance_sum = np.sum(wap_imbalance)
wap_imbalance_std = np.std(wap_imbalance)
wap_imbalance_max = np.max(wap_imbalance)
wap_imbalance_min = np.min(wap_imbalance)
price_spread_mean = np.mean(price_spread)
price_spread_sum = np.sum(price_spread)
price_spread_std = np.std(price_spread)
price_spread_max = np.max(price_spread)
price_spread_min = np.min(price_spread)
bid_spread_mean = np.mean(bid_spread)
bid_spread_sum = np.sum(bid_spread)
bid_spread_std = np.std(bid_spread)
bid_spread_max = np.max(bid_spread)
bid_spread_min = np.min(bid_spread)
ask_spread_mean = np.mean(ask_spread)
ask_spread_sum = np.sum(ask_spread)
ask_spread_std = np.std(ask_spread)
ask_spread_max = np.max(ask_spread)
ask_spread_min = np.min(ask_spread)
total_volume_mean = np.mean(total_volume)
total_volume_sum = np.sum(total_volume)
total_volume_std = np.std(total_volume)
total_volume_max = np.max(total_volume)
total_volume_min = np.min(total_volume)
volume_imbalance_mean = np.mean(volume_imbalance)
volume_imbalance_sum = np.sum(volume_imbalance)
volume_imbalance_std = np.std(volume_imbalance)
volume_imbalance_max = np.max(volume_imbalance)
volume_imbalance_min = np.min(volume_imbalance)
return [wap_imbalance_mean,price_spread_mean,bid_spread_mean,ask_spread_mean,total_volume_mean,volume_imbalance_mean, wap_imbalance_sum,price_spread_sum,bid_spread_sum,ask_spread_sum,total_volume_sum,volume_imbalance_sum, wap_imbalance_std,price_spread_std,bid_spread_std,ask_spread_std,total_volume_std,volume_imbalance_std, wap_imbalance_max,price_spread_max,bid_spread_max,ask_spread_max,total_volume_max,volume_imbalance_max, wap_imbalance_min,price_spread_min,bid_spread_min,ask_spread_min,total_volume_min,volume_imbalance_min]
def other_metrics(df):
if df.shape[0] < 2:
linearFit = 0
linearFit2 = 0
linearFit3 = 0
std_1 = 0
std_2 = 0
std_3 = 0
else:
linearFit = (df['wap'].iloc[-1] - df['wap'].iloc[0]) / ((np.max(df['seconds_in_bucket']) - np.min(df['seconds_in_bucket'])))
linearFit2 = (df['wap2'].iloc[-1] - df['wap2'].iloc[0]) / ((np.max(df['seconds_in_bucket']) - np.min(df['seconds_in_bucket'])))
linearFit3 = (df['wap3'].iloc[-1] - df['wap3'].iloc[0]) / ((np.max(df['seconds_in_bucket']) - np.min(df['seconds_in_bucket'])))
# Resampling
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap'], kind='nearest')
nearest2 = interp1d(t_init, df['wap2'], kind='nearest')
nearest3 = interp1d(t_init, df['wap3'], kind='nearest')
std_1 = np.std(nearest(t_new))
std_2 = np.std(nearest2(t_new))
std_3 = np.std(nearest3(t_new))
return [linearFit, linearFit2, linearFit3, std_1, std_2, std_3]
def load_book_data_by_id_kaggle(stock_id,train_test):
df = pd.read_parquet(f'../input/optiver-realized-volatility-prediction/book_{train_test}.parquet/stock_id={stock_id}')
return df
def load_trades_data_by_id_kaggle(stock_id,train_test):
df = pd.read_parquet(f'../input/optiver-realized-volatility-prediction/trade_{train_test}.parquet/stock_id={stock_id}')
return df
def computeFeatures_wEntropy(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
# Calculate realized volatility
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3']],axis=1)
df_sub = df_sub.rename(columns={'time_id':'row_id','wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3'})
# Calculate realized volatility last 5 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_5['time_id']]
df_sub_5 = pd.concat([df_sub_5,df_sub2_5['wap2'],df_sub3_5['wap3']],axis=1)
df_sub_5 = df_sub_5.rename(columns={'time_id':'row_id','wap': 'rv_5', 'wap2': 'rv2_5', 'wap3': 'rv3_5'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_5'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_5'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_5'])
df_sub_5 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
# Calculate realized volatility last 2 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_sub_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_2['time_id']]
df_sub_2 = pd.concat([df_sub_2,df_sub2_2['wap2'],df_sub3_2['wap3']],axis=1)
df_sub_2 = df_sub_2.rename(columns={'time_id':'row_id','wap': 'rv_2', 'wap2': 'rv2_2', 'wap3': 'rv3_2'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_2'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_2'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_2'])
df_sub_2 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
list_rv.append(df_sub)
list_rv2.append(df_sub_5)
list_rv3.append(df_sub_2)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_book_feats5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={0:'embedding'})
df_sub_book_feats5[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats5.embedding.tolist(), index=df_sub_book_feats5.index)
df_sub_book_feats5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats5['time_id']]
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['wap_imbalance5'])
temp2 = pd.DataFrame([0],columns=['price_spread5'])
temp3 = pd.DataFrame([0],columns=['bid_spread5'])
temp4 = pd.DataFrame([0],columns=['ask_spread5'])
temp5 = pd.DataFrame([0],columns=['total_vol5'])
temp6 = pd.DataFrame([0],columns=['vol_imbalance5'])
df_sub_book_feats5 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_fin.append(df_sub_book_feats)
list_fin2.append(df_sub_book_feats5)
# Compute entropy
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_ent = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(entropy_from_df).to_frame().reset_index().fillna(0)
df_ent2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(entropy_from_df2).to_frame().reset_index().fillna(0)
df_ent3 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(entropy_from_df3).to_frame().reset_index().fillna(0)
df_ent['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_ent['time_id']]
df_ent = df_ent.rename(columns={'time_id':'row_id',0:'entropy'})
df_ent2 = df_ent2.rename(columns={0:'entropy2'}).drop(['time_id'],axis=1)
df_ent3 = df_ent3.rename(columns={0:'entropy3'}).drop(['time_id'],axis=1)
df_ent = pd.concat([df_ent,df_ent2,df_ent3],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['entropy'])
temp2 = pd.DataFrame([0],columns=['entropy2'])
temp3 = pd.DataFrame([0],columns=['entropy3'])
df_ent = pd.concat([times_pd,temp,temp2,temp3],axis=1)
list_ent.append(df_ent)
# Compute other metrics
df_others = book_stock.groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others = df_others.rename(columns={0:'embedding'})
df_others[['linearFit1_1','linearFit1_2','linearFit1_3','wap_std1_1','wap_std1_2','wap_std1_3']] = pd.DataFrame(df_others.embedding.tolist(), index=df_others.index)
df_others['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others['time_id']]
df_others = df_others.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_others.append(df_others)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_others2 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others2 = df_others2.rename(columns={0:'embedding'})
df_others2[['linearFit2_1','linearFit2_2','linearFit2_3','wap_std2_1','wap_std2_2','wap_std2_3']] = pd.DataFrame(df_others2.embedding.tolist(), index=df_others2.index)
df_others2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others2['time_id']]
df_others2 = df_others2.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit2_1'])
temp2 = pd.DataFrame([0],columns=['linearFit2_2'])
temp3 = pd.DataFrame([0],columns=['linearFit2_3'])
temp4 = pd.DataFrame([0],columns=['wap_std2_1'])
temp5 = pd.DataFrame([0],columns=['wap_std2_2'])
temp6 = pd.DataFrame([0],columns=['wap_std2_3'])
df_others2 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others2.append(df_others2)
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_others3 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others3 = df_others3.rename(columns={0:'embedding'})
df_others3[['linearFit3_1','linearFit3_2','linearFit3_3','wap_std3_1','wap_std3_2','wap_std3_3']] = pd.DataFrame(df_others3.embedding.tolist(), index=df_others3.index)
df_others3['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others3['time_id']]
df_others3 = df_others3.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit3_1'])
temp2 = pd.DataFrame([0],columns=['linearFit3_2'])
temp3 = pd.DataFrame([0],columns=['linearFit3_3'])
temp4 = pd.DataFrame([0],columns=['wap_std3_1'])
temp5 = pd.DataFrame([0],columns=['wap_std3_2'])
temp6 = pd.DataFrame([0],columns=['wap_std3_3'])
df_others3 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others3.append(df_others3)
print('Computing one stock took', time.time() - start, 'seconds for stock ', stock_id)
# Create features dataframe
df_submission = pd.concat(list_rv)
df_submission2 = pd.concat(list_rv2)
df_submission3 = pd.concat(list_rv3)
df_ent_concat = pd.concat(list_ent)
df_fin_concat = pd.concat(list_fin)
df_fin2_concat = pd.concat(list_fin2)
df_others = pd.concat(list_others)
df_others2 = pd.concat(list_others2)
df_others3 = | pd.concat(list_others3) | pandas.concat |
# for data manipulation
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# for visualization
from matplotlib import pyplot as plt
# to include graphs inline within the frontends next to code
import seaborn as sns
# preprocessing functions and evaluation models
from sklearn.preprocessing import OneHotEncoder, MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV,RandomizedSearchCV
from sklearn.pipeline import Pipeline
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.dummy import DummyClassifier
# machine learning models
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier,ExtraTreesClassifier
from xgboost import XGBClassifier
submission_sample = pd.read_csv('../input/forest-cover-type-prediction/sampleSubmission.csv')
train = pd.read_csv('../input/forest-cover-type-prediction/train.csv')
test = pd.read_csv('../input/forest-cover-type-prediction/test.csv')
print("Number of rows and columns in the train dataset are:", train.shape)
train.head()
train.tail()
train.dtypes
print(list(enumerate(train.columns)))
train.iloc[:,1:10].describe()
train.nunique()
train.isna().sum()
def outlier_function(df, col_name):
''' this function detects first and third quartile and interquartile range for a given column of a dataframe
then calculates upper and lower limits to determine outliers conservatively
returns the number of lower and uper limit and number of outliers respectively
'''
first_quartile = np.percentile(np.array(df[col_name].tolist()), 25)
third_quartile = np.percentile(np.array(df[col_name].tolist()), 75)
IQR = third_quartile - first_quartile
upper_limit = third_quartile+(3*IQR)
lower_limit = first_quartile-(3*IQR)
outlier_count = 0
for value in df[col_name].tolist():
if (value < lower_limit) | (value > upper_limit):
outlier_count +=1
return lower_limit, upper_limit, outlier_count
# loop through all columns to see if there are any outliers
for i in train.columns:
if outlier_function(train, i)[2] > 0:
print("There are {} outliers in {}".format(outlier_function(train, i)[2], i))
trees = train[(train['Horizontal_Distance_To_Fire_Points'] > outlier_function(train, 'Horizontal_Distance_To_Fire_Points')[0]) &
(train['Horizontal_Distance_To_Fire_Points'] < outlier_function(train, 'Horizontal_Distance_To_Fire_Points')[1])]
trees.shape
size=10
Uni = []
for i in range(size+1,len(train.columns)-1):
Uni.append( | pd.unique(train[train.columns[i]].values) | pandas.unique |
# First Import Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import warnings
# Read CSV File For RAW Heating And Electrical Consumption Pattern From Scraped Data
df = pd.read_csv("C:\\Users\\PowerMan\\Desktop\\KASR\Final\\Code and data\\Data\\Whole_scraped_data\\Total-Load-Whole-Premise.csv")
# Delete additional datas in RAW File
# Delete Extra Columns
df.drop('Date Start',axis=1,inplace=True)
df.drop('(Weekdays) or (Weekends and Holidays)',axis=1,inplace=True)
df.drop('Demand',axis=1,inplace=True)
# Delete Extra Raws
# Create an index to filter raws faster than normal for loop!
df['keep']=np.where( df['City'] == 'City', 1, 0)
df=df[df['keep']==0]
# Delete Auxilary index Column
df.drop('keep',axis=1,inplace=True)
# Change data type to float
df[['HR1','HR2','HR3','HR4','HR5','HR6','HR7','HR8','HR9','HR10','HR11','HR12','HR13','HR14','HR15','HR16','HR17','HR18','HR19','HR20','HR21','HR22','HR23','HR24']] = df[['HR1','HR2','HR3','HR4','HR5','HR6','HR7','HR8','HR9','HR10','HR11','HR12','HR13','HR14','HR15','HR16','HR17','HR18','HR19','HR20','HR21','HR22','HR23','HR24']].astype('float64')
# Convert Sqft To Sqm Sqft/10.764=Sqm
sqft_coef = 10.76391041671
#Convert wh/Sqft To wh/Sqm
df.loc[:,'HR1':'HR24']=df.loc[:,'HR1':'HR24']/(sqft_coef)
# Create 2 Column for State And City
df['city_name'] = df.iloc[:, 0]
df['city_state'] = df.iloc[:, 0]
# Split City and State
for i in range (0,120450):
ct = df.iloc[i, 0]
ct = ct.split(',', 1)
ctname = ct[0]
ctstate = ct[1]
df.iloc[i, 28] = ctname
df.iloc[i, 29] = ctstate
# Save File step1
df.to_csv(path_or_buf="C:\\Users\\PowerMan\\Desktop\\KASR\\Final\\Code and data\\Codes\\datamunging\\step1.csv")
########################################################################################################
########################################################################################################
########################################################################################################
# Read New Modified CSV File
df = pd.read_csv("C:\\Users\\PowerMan\\Desktop\\KASR\\Final\\Code and data\\Codes\\datamunging\\step1.csv")
# Delete Old Index Column
df.drop('Unnamed: 0',axis=1,inplace=True)
#Create Day index
day=[]
for i in range (1,331):
for j in range (1,366):
day.append(j)
day_no=pd.Series(day)
df['day_num']=day_no
#Load Cities List
ct = pd.read_csv("C:\\Users\\PowerMan\\Desktop\\KASR\\Final\\Code and data\\Codes\\datamunging\\sourcecity.csv")
df['climate']= df['city_name']
df['city_no']= df['city_name']
df[['city_name','city_no','climate']] = df[['city_name','city_no','climate']].astype(str)
climate=[]
city_no=[]
#Extract Data from source ct climate and city numbers
for i in range (0,120450):
cit = str(df.iloc[i, 28])
cit=cit.lower()
for j in range (0,55):
ctt=ct.iloc[j, 2]
ctt=ctt.lower()
if cit==ctt:
df.iloc[i,30] = ct.iloc[j, 3]
df.iloc[i,31] = ct.iloc[j, 0]
# Save File step2
df.to_csv(path_or_buf="C:\\Users\\PowerMan\\Desktop\\KASR\\Final\\Code and data\\Codes\\datamunging\\step2.csv")
##################################################################################################
# Read New Modified CSV File
df = pd.read_csv("C:\\Users\\PowerMan\\Desktop\\KASR\\Final\\Code and data\\Codes\\datamunging\\step2.csv")
# Delete Old Index Column
df.drop('Unnamed: 0',axis=1,inplace=True)
# Delete Extra Column
df.drop('city_no',axis=1,inplace=True)
# Delete Extra Column
df.drop('City',axis=1,inplace=True)
# Delete Extra Column
df.drop('city_state',axis=1,inplace=True)
# Rearreng colmns
data_colm=['date','load_type','building_type','1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','city','climate','city_no']
df.columns=data_colm
#Create Day index
day=[]
for i in range (1,331):
for j in range (1,366):
day.append(j)
day_no=pd.Series(day)
df['day_num']=day_no
# Create A Column For Each Day Sum Of Loads
df['sum_day']= df.loc[:, '1':'24'].sum(axis=1)
# Sort Columns
new_order = [27,29,28,0,30,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,31]
df = df[df.columns[new_order]]
# Change Name of Load Types
df['load_type'] = df.load_type.map({'Electric':'Energy','Fossil Fuel':'Electric'})
# Save File step3
df.to_csv(path_or_buf="C:\\Users\\PowerMan\\Desktop\\KASR\\Final\\Code and data\\Codes\\datamunging\\step3.csv")
#######################################################################################
# Step 3
# Read New Modified CSV File
df = pd.read_csv("C:\\Users\\PowerMan\\Desktop\\KASR\\Final\\Code and data\\Codes\\datamunging\\step3.csv")
# Delete Old Index Column
df.drop('Unnamed: 0',axis=1,inplace=True)
# Reduce File size
df['building_type']=df.building_type.map({'Office, Large':4,'Office, Medium':3,'Office, Small':1})
# Seprate Electrical and Energy
en=df.loc[df.load_type=='Energy'].copy()
el=df.loc[df.load_type=='Electric'].copy()
he=df.loc[df.load_type=='Electric'].copy()
he['load_type'] = he.load_type.map({'Electric':'Heat'})
he.drop('sum_day',axis=1,inplace=True)
# Save File step4
en.to_csv(path_or_buf="C:\\Users\\PowerMan\\Desktop\\KASR\\Final\\Code and data\\Codes\\datamunging\\step4en.csv")
el.to_csv(path_or_buf="C:\\Users\\PowerMan\\Desktop\\KASR\\Final\\Code and data\\Codes\\datamunging\\step4el.csv")
he.to_csv(path_or_buf="C:\\Users\\PowerMan\\Desktop\\KASR\\Final\\Code and data\\Codes\\datamunging\\step4he.csv")
###############################################################################################################
# Read New Modified CSV File
he = pd.read_csv("C:\\Users\\PowerMan\\Desktop\\KASR\\Final\\Code and data\\Codes\\datamunging\\step4he.csv")
# Delete Old Index Column
he.drop('Unnamed: 0',axis=1,inplace=True)
for i in range (0,60225):
for j in range (7,31):
if he.iloc[i,j]<0:
he.iloc[i,j]=0
he['sum_day']= he.loc[:, '1':'24'].sum(axis=1)
# Save File step5
he.to_csv(path_or_buf="C:\\Users\\PowerMan\\Desktop\\KASR\\Final\\Code and data\\Codes\\datamunging\\step5he.csv")
############################################################################################################
# Step 6
# Read New Modified CSV File
he = pd.read_csv("C:\\Users\\PowerMan\\Desktop\\KASR\\Final\\Code and data\\Codes\\datamunging\\step5he.csv")
el = pd.read_csv("C:\\Users\\PowerMan\\Desktop\\KASR\\Final\\Code and data\\Codes\\datamunging\\step5el.csv")
#en = pd.read_csv("C:\\Users\\PowerMan\\Desktop\\KASR\\Data\\Pandas\\RAW_Mannaged\\step5en.csv")
# Delete Old Index Column
he.drop('Unnamed: 0',axis=1,inplace=True)
el.drop('Unnamed: 0',axis=1,inplace=True)
alll = pd.read_csv("C:\\Users\\PowerMan\\Desktop\\KASR\\Final\\Code and data\\Codes\\datamunging\\annuallysum.csv")
sumey1=[]
sumey3=[]
sumey4=[]
sumhy1=[]
sumhy3=[]
sumhy4=[]
for j in range (0,165,3):
c = j * 365
jj=j/3
jj=int(jj)
sumey1.append(0)
sumey3.append(0)
sumey4.append(0)
sumhy1.append(0)
sumhy3.append(0)
sumhy4.append(0)
city_name = el.iloc[ c, 0]
city_climate=el.iloc[ c,2]
city_number=el.iloc[ c,1]
alll.iloc[j,1]= city_name
alll.iloc[j,2]= city_climate
alll.iloc[j,0]= city_number
alll.iloc[j+1, 1] = city_name
alll.iloc[j+1, 2] = city_climate
alll.iloc[j+1, 0] = city_number
alll.iloc[j+2, 1] = city_name
alll.iloc[j+2, 2] = city_climate
alll.iloc[j+2, 0] = city_number
alll.iloc[j, 3] = 1
alll.iloc[j+1, 3] = 3
alll.iloc[j+2, 3] = 4
for i in range (0,360):
sumey4[jj]= el.iloc[i+c, 31] + sumey4[jj]
sumey3[jj]= el.iloc[i+c + 1, 31] + sumey3[jj]
sumey1[jj]= el.iloc[i+c+ 2, 31] + sumey1[jj]
sumhy4[jj]= he.iloc[i+c, 31] + sumhy4[jj]
sumhy3[jj]= he.iloc[i+c + 1, 31] + sumhy3[jj]
sumhy1[jj]= he.iloc[i+c + 2, 31] + sumhy1[jj]
alll.iloc[j, 4] = sumhy1[jj]
alll.iloc[j + 1, 4] = sumhy3[jj]
alll.iloc[j + 2, 4] = sumhy4[jj]
alll.iloc[j, 5] = sumey1[jj]
alll.iloc[j + 1, 5] = sumey3[jj]
alll.iloc[j + 2, 5] = sumey4[jj]
alll.iloc[j, 6] = sumey1[jj]+sumhy1[jj]
alll.iloc[j + 1, 6] = sumey3[jj]+sumhy3[jj]
alll.iloc[j + 2, 6] = sumey4[jj]+sumhy4[jj]
alll.loc[:,'heat':'energy']=alll.loc[:,'heat':'energy']/(1000)
######################################################################################################
# step 7
warnings.simplefilter(action='ignore', category=FutureWarning)
# Read New Modified CSV File
he = pd.read_csv("C:\\Users\\PowerMan\\Desktop\\KASR\\Data\\Pandas\\RAW_Mannaged\\he_sorted_971012_1054.csv")
el = pd.read_csv("C:\\Users\\PowerMan\\Desktop\\KASR\\Data\\Pandas\\RAW_Mannaged\\el_sorted_971012_1054.csv")
coef = pd.read_csv("C:\\Users\\PowerMan\\Desktop\\KASR\\Data\\Pandas\\Eng_Analysed\\Coefficents.csv")
# Delete Old Index Column
he.drop('Unnamed: 0',axis=1,inplace=True)
el.drop('Unnamed: 0',axis=1,inplace=True)
el0B = el.loc[(el.climate=='1A')&(el.city=='Miami')]
el1A = el.loc[(el.climate=='1A')&(el.city=='Miami')]
el1B = el.loc[(el.climate=='1A')&(el.city=='Miami')]
el2A = el.loc[(el.climate=='2A')&(el.city=='Austin')]
el2B = el.loc[(el.climate=='2B')&(el.city=='Phoenix')]
el3A = el.loc[(el.climate=='3A')&(el.city=='Charlotte')]
el3B = el.loc[(el.climate=='3B')&(el.city=='Las Vegas')]
el4A = el.loc[(el.climate=='4A')&(el.city=='Newark')]
el4B = el.loc[(el.climate=='4B')&(el.city=='Amarillo')]
el4C = el.loc[(el.climate=='4C')&(el.city=='Medford')]
el5A = el.loc[(el.climate=='5A')&(el.city=='Cleveland')]
el5C = el.loc[(el.climate=='5A')&(el.city=='Cleveland')]
el0B .loc[:,'1':'24']=el0B .loc[:,'1':'24']*(coef.iloc[0,2])
el1A .loc[:,'1':'24']=el1A .loc[:,'1':'24']*(coef.iloc[1,2])
el1B .loc[:,'1':'24']=el1B .loc[:,'1':'24']*(coef.iloc[2,2])
el2A .loc[:,'1':'24']=el2A .loc[:,'1':'24']*(coef.iloc[3,2])
el2B .loc[:,'1':'24']=el2B .loc[:,'1':'24']*(coef.iloc[4,2])
el3A .loc[:,'1':'24']=el3A .loc[:,'1':'24']*(coef.iloc[5,2])
el3B .loc[:,'1':'24']=el3B .loc[:,'1':'24']*(coef.iloc[6,2])
el4A .loc[:,'1':'24']=el4A .loc[:,'1':'24']*(coef.iloc[7,2])
el4B .loc[:,'1':'24']=el4B .loc[:,'1':'24']*(coef.iloc[8,2])
el4C .loc[:,'1':'24']=el4C .loc[:,'1':'24']*(coef.iloc[9,2])
el5A .loc[:,'1':'24']=el5A .loc[:,'1':'24']*(coef.iloc[10,2])
el5C .loc[:,'1':'24']=el5C .loc[:,'1':'24']*(coef.iloc[11,2])
he0B = he.loc[(he.climate=='1A')&(he.city=='Miami')]
he1A = he.loc[(he.climate=='1A')&(he.city=='Miami')]
he1B = he.loc[(he.climate=='1A')&(he.city=='Miami')]
he2A = he.loc[(he.climate=='2A')&(he.city=='Austin')]
he2B = he.loc[(he.climate=='2B')&(he.city=='Phoenix')]
he3A = he.loc[(he.climate=='3A')&(he.city=='Charlotte')]
he3B = he.loc[(he.climate=='3B')&(he.city=='Las Vegas')]
he4A = he.loc[(he.climate=='4A')&(he.city=='Newark')]
he4B = he.loc[(he.climate=='4B')&(he.city=='Amarillo')]
he4C = he.loc[(he.climate=='4C')&(he.city=='Medford')]
he5A = he.loc[(he.climate=='5A')&(he.city=='Cleveland')]
he5C = he.loc[(he.climate=='5A')&(he.city=='Cleveland')]
he0B .loc[:,'1':'24']=he0B .loc[:,'1':'24']*(coef.iloc[0,3])
he1A .loc[:,'1':'24']=he1A .loc[:,'1':'24']*(coef.iloc[1,3])
he1B .loc[:,'1':'24']=he1B .loc[:,'1':'24']*(coef.iloc[2,3])
he2A .loc[:,'1':'24']=he2A .loc[:,'1':'24']*(coef.iloc[3,3])
he2B .loc[:,'1':'24']=he2B .loc[:,'1':'24']*(coef.iloc[4,3])
he3A .loc[:,'1':'24']=he3A .loc[:,'1':'24']*(coef.iloc[5,3])
he3B .loc[:,'1':'24']=he3B .loc[:,'1':'24']*(coef.iloc[6,3])
he4A .loc[:,'1':'24']=he4A .loc[:,'1':'24']*(coef.iloc[7,3])
he4B .loc[:,'1':'24']=he4B .loc[:,'1':'24']*(coef.iloc[8,3])
he4C .loc[:,'1':'24']=he4C .loc[:,'1':'24']*(coef.iloc[9,3])
he5A .loc[:,'1':'24']=he5A .loc[:,'1':'24']*(coef.iloc[10,3])
he5C .loc[:,'1':'24']=he5C .loc[:,'1':'24']*(coef.iloc[11,3])
el0B['climate'] = el0B.climate.map({'1A':'0B'})
el1A['climate'] = el1A.climate.map({'1A':'1B'})
el5C['climate'] = el5C.climate.map({'5A':'5C'})
he0B['climate'] = he0B.climate.map({'1A':'0B'})
he1A['climate'] = he1A.climate.map({'1A':'1B'})
he5C['climate'] = he5C.climate.map({'5A':'5C'})
el10B = el0B.loc[(el0B.building_type==1)]
el11A = el1A.loc[(el1A.building_type==1)]
el11B = el1B.loc[(el1B.building_type==1)]
el12A = el2A.loc[(el2A.building_type==1)]
el12B = el2B.loc[(el2B.building_type==1)]
el13A = el3A.loc[(el3A.building_type==1)]
el13B = el3B.loc[(el3B.building_type==1)]
el14A = el4A.loc[(el4A.building_type==1)]
el14B = el4B.loc[(el4B.building_type==1)]
el14C = el4C.loc[(el4C.building_type==1)]
el15A = el5A.loc[(el5A.building_type==1)]
el15C = el5C.loc[(el5C.building_type==1)]
he10B = he0B.loc[(he0B.building_type==1)]
he11A = he1A.loc[(he1A.building_type==1)]
he11B = he1B.loc[(he1B.building_type==1)]
he12A = he2A.loc[(he2A.building_type==1)]
he12B = he2B.loc[(he2B.building_type==1)]
he13A = he3A.loc[(he3A.building_type==1)]
he13B = he3B.loc[(he3B.building_type==1)]
he14A = he4A.loc[(he4A.building_type==1)]
he14B = he4B.loc[(he4B.building_type==1)]
he14C = he4C.loc[(he4C.building_type==1)]
he15A = he5A.loc[(he5A.building_type==1)]
he15C = he5C.loc[(he5C.building_type==1)]
el30B = el0B.loc[(el0B.building_type==3)]
el31A = el1A.loc[(el1A.building_type==3)]
el31B = el1B.loc[(el1B.building_type==3)]
el32A = el2A.loc[(el2A.building_type==3)]
el32B = el2B.loc[(el2B.building_type==3)]
el33A = el3A.loc[(el3A.building_type==3)]
el33B = el3B.loc[(el3B.building_type==3)]
el34A = el4A.loc[(el4A.building_type==3)]
el34B = el4B.loc[(el4B.building_type==3)]
el34C = el4C.loc[(el4C.building_type==3)]
el35A = el5A.loc[(el5A.building_type==3)]
el35C = el5C.loc[(el5C.building_type==3)]
he30B = he0B.loc[(he0B.building_type==3)]
he31A = he1A.loc[(he1A.building_type==3)]
he31B = he1B.loc[(he1B.building_type==3)]
he32A = he2A.loc[(he2A.building_type==3)]
he32B = he2B.loc[(he2B.building_type==3)]
he33A = he3A.loc[(he3A.building_type==3)]
he33B = he3B.loc[(he3B.building_type==3)]
he34A = he4A.loc[(he4A.building_type==3)]
he34B = he4B.loc[(he4B.building_type==3)]
he34C = he4C.loc[(he4C.building_type==3)]
he35A = he5A.loc[(he5A.building_type==3)]
he35C = he5C.loc[(he5C.building_type==3)]
el40B = el0B.loc[(el0B.building_type==4)]
el41A = el1A.loc[(el1A.building_type==4)]
el41B = el1B.loc[(el1B.building_type==4)]
el42A = el2A.loc[(el2A.building_type==4)]
el42B = el2B.loc[(el2B.building_type==4)]
el43A = el3A.loc[(el3A.building_type==4)]
el43B = el3B.loc[(el3B.building_type==4)]
el44A = el4A.loc[(el4A.building_type==4)]
el44B = el4B.loc[(el4B.building_type==4)]
el44C = el4C.loc[(el4C.building_type==4)]
el45A = el5A.loc[(el5A.building_type==4)]
el45C = el5C.loc[(el5C.building_type==4)]
he40B = he0B.loc[(he0B.building_type==4)]
he41A = he1A.loc[(he1A.building_type==4)]
he41B = he1B.loc[(he1B.building_type==4)]
he42A = he2A.loc[(he2A.building_type==4)]
he42B = he2B.loc[(he2B.building_type==4)]
he43A = he3A.loc[(he3A.building_type==4)]
he43B = he3B.loc[(he3B.building_type==4)]
he44A = he4A.loc[(he4A.building_type==4)]
he44B = he4B.loc[(he4B.building_type==4)]
he44C = he4C.loc[(he4C.building_type==4)]
he45A = he5A.loc[(he5A.building_type==4)]
he45C = he5C.loc[(he5C.building_type==4)]
writer = pd.ExcelWriter("C:\\Users\\PowerMan\\Desktop\\KASR\\Data\\Pandas\\Eng_Analysed\\Climate_0B.xlsx")
el10B.to_excel(writer,'el10B')
el30B.to_excel(writer,'el30B')
el40B.to_excel(writer,'el40B')
he10B.to_excel(writer,'he10B')
he30B.to_excel(writer,'he30B')
he40B.to_excel(writer,'he40B')
writer.save()
writer = pd.ExcelWriter("C:\\Users\\PowerMan\\Desktop\\KASR\\Data\\Pandas\\Eng_Analysed\\Climate_1A.xlsx")
el11A.to_excel(writer,'el11A')
el31A.to_excel(writer,'el31A')
el41A.to_excel(writer,'el41A')
he11A.to_excel(writer,'he11A')
he31A.to_excel(writer,'he31A')
he41A.to_excel(writer,'he41A')
writer.save()
writer = pd.ExcelWriter("C:\\Users\\PowerMan\\Desktop\\KASR\\Data\\Pandas\\Eng_Analysed\\Climate_1B.xlsx")
el11B.to_excel(writer,'el11B')
el31B.to_excel(writer,'el31B')
el41B.to_excel(writer,'el41B')
he11B.to_excel(writer,'he11B')
he31B.to_excel(writer,'he31B')
he41B.to_excel(writer,'he41B')
writer.save()
writer = pd.ExcelWriter("C:\\Users\\PowerMan\\Desktop\\KASR\\Data\\Pandas\\Eng_Analysed\\Climate_2A.xlsx")
el12A.to_excel(writer,'el12A')
el32A.to_excel(writer,'el32A')
el42A.to_excel(writer,'el42A')
he12A.to_excel(writer,'he12A')
he32A.to_excel(writer,'he32A')
he42A.to_excel(writer,'he42A')
writer.save()
writer = | pd.ExcelWriter("C:\\Users\\PowerMan\\Desktop\\KASR\\Data\\Pandas\\Eng_Analysed\\Climate_2B.xlsx") | pandas.ExcelWriter |
import sys
import os
#handling the paths and the model
cwd = os.getcwd()
sys.path.append(cwd)
import pysd
from pathlib import Path
from pysd.py_backend.functions import Model
import matplotlib.pyplot as plt
import pandas as pd
import varcontrol
import time
start = time.time()
model = Model('corona_base_hackathon_treated.py')
path = Path.cwd()
out_path = path / 'output'
set_path = path / 'settings'
try:
file_lst = list(out_path.glob('*'))
for file in file_lst:
file.unlink()
except FileNotFoundError:
pass
out_path.mkdir(exist_ok=True)
#reading the settings
policy_df = pd.read_csv(set_path / 'policy.csv',index_col=0)
time_df = pd.read_csv(set_path / 'timesettings.csv',index_col=0)
init_df = pd.read_csv(set_path / 'initialconditions.csv',index_col=0)
model_df = | pd.read_csv(set_path / 'modelsettings.csv',index_col=0) | pandas.read_csv |
# Imports
from snakemake.shell import shell
import pandas as pd
import os
# Wrapper info
wrapper_name = "salmon_count_merge"
wrapper_version = "0.0.2"
author = "<NAME>"
license = "MIT"
shell("echo 'Wrapper {wrapper_name} v{wrapper_version} / {author} / Licence {license}' > {snakemake.log}")
# Shortcuts
input_counts = snakemake.input.counts
output_counts = snakemake.output.get("counts", None)
output_tpm = snakemake.output.get("tpm", None)
df_counts = pd.DataFrame()
df_tpm = | pd.DataFrame() | pandas.DataFrame |
#import all the libraries
import io
import pandas as pd
import glob
from tabulate import tabulate
#function definations
############ Create a function which reads from a directory and return table with Mipox parameters and lot and wafer ID.
def read_excel_files(directory,sheetName,fileName):
all_data = pd.DataFrame()
data = pd.DataFrame()
directory = directory+ fileName +".xlsx"
for f in glob.glob(directory):
print(f)
#data = pd.read_excel(f, header=[2],sheet_name=sheetName)
data = pd.read_excel(f, header=[0], sheet_name=sheetName)
#print(tabulate(data))
all_data = | pd.concat([all_data, data],ignore_index=True) | pandas.concat |
from http.client import responses
from statistics import mean
import IMLearn.learners.regressors.linear_regression
from IMLearn.learners.regressors import PolynomialFitting
from IMLearn.utils import split_train_test
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
def load_data(filename: str) -> pd.DataFrame:
"""
Load city daily temperature dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (Temp)
"""
df = pd.read_csv(filename , parse_dates =['Date'])
df.dropna(inplace=True)
df = df[df['Temp']>=-10]
responses = df['Temp']
df['DayOfYear'] = df['Date'].dt.dayofyear
df.drop(['Temp'], axis=1, inplace=True)
return (df,responses)
if __name__ == '__main__':
np.random.seed(0)
# Question 1 - Load and preprocessing of city temperature dataset
x, y = load_data('/home/ronyzerkavod/IML.HUJI/datasets/City_Temperature.csv')
# Question 2 - Exploring data for specific country
df_curr = | pd.concat([x,y], axis=1) | pandas.concat |
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
import os
import numpy as np
import pandas as pd
pd.options.mode.chained_assignment = None # default='warn'
import scipy.spatial.distance as dist
import scipy.cluster.hierarchy as sch
# fn = os.path.join('static/gcorr/opt_corr.tsv.gz')
# fn = os.path.join('static/gcorr/traits.tsv.gz')
def initialize():
# Minimum z-score to display data for.
min_z = 3
# Minimum number of cases for binary phenotypes
min_cases = 250
# Height in px of plot
plot_height = 800
# Width in px of plot
plot_width = 1200
# Maximum number of phenotypes to display
max_phenos = 100
fn = os.path.join('/biobankengine/app/static/gcorr/opt_corr.tsv.gz')
data = pd.read_table(fn, index_col=0)
t = data.copy(deep=True)
t.index = ['_'.join(list(reversed(x.split('_')))) for x in t.index]
for columns in [['p1_code', 'p2_code'], ['tau1', 'tau2'], ['p1', 'p2'],
['p1_num_cases', 'p2_num_cases']]:
a = list(t[columns[0]])
b = list(t[columns[1]])
t.loc[:, columns[1]] = a
t.loc[:, columns[0]] = b
data = | pd.concat([data, t]) | pandas.concat |
#!/usr/bin/python3
import pandas as pd
import subprocess
import os
import matplotlib.pyplot as plt
import numpy as np
import time
import glob
pd.set_option('display.max_columns', None)
| pd.set_option('display.max_rows', None) | pandas.set_option |
import numpy as np
import pandas as pd
import statsmodels.api as sm
from sklearn.preprocessing import OneHotEncoder
import statistics
import math
import sys
import itertools
import time
np.seterr(over='raise', under="ignore")
def batch_pp(df, covariates, batch_column, ignore):
"""This function takes in a df, the name of the covariate columns, and the batch column
and it outputs a feature count matrix, feature zero inflation matrix,
batch dummy matrix (one hot vectors as rows), covariate matrix (concatenated one hot vectors )
(covariates coefficient matrix [X_ij], batch dummy matrix [X_batch],
the zero inflation matrix [I_ijk], and count matrix [Y])
NOTE: this df can be a combination of datasets, or an individual dataset"""
# df: [dataframe] input with rows as samples and columns as feature counts.
# should only have OTU names ,covariates, and batch_column in keyspace
# covariates: [List] of the covariates to retain and estimate betas for
# batch_column: [string] column that defines the batches in this dataframe
# ignore: [List] of column names to ignore
################################### Check proper input ###################################
if (batch_column not in df.keys()):
raise ValueError("Column name " + str(batch_column) + " not found")
if (not set(covariates) <= set(df.keys())):
raise ValueError("Covariate columns not found in dataframe")
################################### Turn batch column to one hot vector ###################################
# note: for all features, batch matrix and covariate matrix will be the same.
X_batch = pd.get_dummies(df[batch_column], drop_first=False)
################################### Turn covariate columns covariate matrix ###################################
# number of columns is the number of betas to estimate
X_cov = pd.get_dummies(df[covariates], drop_first=True)
intercept = [1 for _ in range(X_cov.shape[0])]
# adding intercept term
X_cov.insert(0, "intercept", intercept)
################################### Build the feature zero inflation matrix ###################################
# turn numbers to 1 and keep zeroes the way they are
otu_keys = df.keys().drop(ignore)
I = df[otu_keys].replace('0.0', False).astype(bool).replace(False, 0).replace(True, 1)
df_dict = {"X_cov": X_cov,
"X_batch": X_batch,
"I": I,
"Y": df[otu_keys],
"ignore": df[ignore]}
return df_dict
def reduce_batch_effects(Y, I, X_cov, X_batch, verbose=False):
"""This function takes in the output of batch_pp and does the feature-wise batch reduction"""
# INPUT:
# Y: matrix of feature counts with the columns as features and columns as sample counts as rows
# I: matrix of feature zero inflation (1s where values are >=1, 0s o.w.)
# X_cov: covariance matrix (this will give us the betas we need to estimate)
# X_batch: dummy matrix of batch values
# OUTPUT:
# corrected matrix
# merge the dummy variables for the covariates and also for the batch to get the whole design matrix
X_mat = pd.concat([X_cov, X_batch], axis=1).astype(float)
# type conversions and index storing
Y = Y.astype(float)
num_beta_cov = X_cov.shape[1]
num_beta_batch = X_batch.shape[1]
num_features = len(Y.keys())
num_samples = Y.shape[0]
Z = pd.DataFrame(index=Y.index, columns=Y.columns)
# for each of the features, we will calculate the batch reduction coefficients, then reduce the batch effects
count = 0
otu_names = list(Y.keys())
otu_names = [x for x in otu_names if Y[x][Y[x] > 0].count() > 2]
sigma_p_store = {}
beta_params_store = pd.DataFrame(columns=Y.columns, index=X_mat.columns)
beta_cov_store = | pd.DataFrame(columns=Y.columns, index=X_cov.columns) | pandas.DataFrame |
import sys
import logging
import pandas as pd
import pathlib
import numpy as np
import math
from scipy.stats import rankdata
try:
# creating logger
logging.basicConfig(filename="101903643-logger-1.log",
format='%(asctime)s %(message)s',
filemode='w')
logger = logging.getLogger('101903643-1')
logger.setLevel(logging.DEBUG)
# for storing the number of arguments
n = len(sys.argv)
#print(n)
# exception when number of arguments passed > 2
if n > 5 or n < 5:
logger.error(
"Wrong number of comandline arguments passed,they are greater than or less than 2,")
sys.exit("Error in the arguments passed found, check log file")
name = sys.argv[1]
file = pathlib.Path(name)
#print(file)
if not file.is_file():
# Input file does not exists
logger.error(
"Error while reading the input file, file with the mentioned name does not exists")
sys.exit("Error while reading the input file, check log file")
# creating dataframe of inpu file
df = pd.read_csv(str(name))
#print('hi')
#print(df)
columns = df.columns
weights=sys.argv[2]
impact=sys.argv[3]
output_filename=sys.argv[4]
weights = list(weights.split(","))
weights=[float(i) for i in weights]
impact=list(impact.split(","))
#print(weights)
#print(mat)
def calculate(df,weights,impact):
row_count=df.shape[0]
col_count=df.shape[1]
if col_count < 3:
logger.error(
" less number of columns in input file,please add more columns")
sys.exit("Error in Input File")
if(len(weights)<col_count-1 and len(impact)<col_count-1):
logger.error(
" less number of weights & impacts passed in the command line arguments")
sys.exit("Error in command line arguments")
if(len(weights)>col_count-1 and len(impact)>col_count-1):
logger.error(
" More number of weights and impacts passed in the command line arguments ")
sys.exit("Error in command line arguments")
if(len(weights)<col_count-1):
logger.error(
"Less number of weights passed in the command line arguments")
sys.exit("Error in command line arguments")
if(len(weights)>col_count-1):
logger.error(
"More number of Weights passed in the command line arguments ")
sys.exit("Error in command line arguments")
if(len(impact)<col_count-1):
logger.error(
"Less Number of Impacts passed in the command line arguments ")
sys.exit("Error in command line arguments")
if(len(impact)>col_count-1):
logger.error(
"More Number of Impacts passed in the command line arguments ")
sys.exit("Error in command line arguments")
for i in impact:
if(i=='+' or i=='-'):
pass
else:
logger.error(
"Error in the impact argument passed during command line,please make sure the image type is '+' or '-' ")
sys.exit("Error in command line arguments")
normalized_df_denominator=[]
for i in range(1,col_count):
col_denominator=0
for j in range(row_count):
if isinstance(df.iloc[j][i],int) or isinstance(df.iloc[j][i],float):
col_denominator=col_denominator+float(df.iloc[j][i]**2)
else:
logger.error(
"Error in the input file,the elements are either of string type or char type")
sys.exit("Error in input file")
col_denominator=float(math.sqrt(col_denominator))
normalized_df_denominator.append(col_denominator)
for i in range(1,col_count):
for j in range(row_count):
if(float(normalized_df_denominator[i-1])==0.0):
logger.error(
"Dividing by zero while normalising")
sys.exit("Can not divide by zero")
a=df.iloc[j,i]/float(normalized_df_denominator[i-1])
df.iloc[j,i]=a
for i in range(1,col_count):
for j in range(row_count):
a=df.iloc[j,i]*weights[i-1]
df.iloc[j,i]=a
#calculating ideal best and worst
best=[]
worst=[]
for i in range(1,col_count):
if impact[i-1]=='+':
best.append(df.iloc[:,i].max())
worst.append(df.iloc[:,i].min())
else:
worst.append(df.iloc[:,i].max())
best.append(df.iloc[:,i].min())
performance=[]
for i in range(row_count):
sum_pos=sum((df.iloc[i,1:]-best[:])**2)
sum_neg=sum((df.iloc[i,1:]-worst[:])**2)
sum_pos=math.sqrt(sum_pos)
sum_neg=math.sqrt(sum_neg)
sums=sum_pos + sum_neg
p=sum_neg/sums
performance.append(p)
index=rankdata(np.array(performance))
new_df= | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import scipy as sp
import pandas as pd
# Path
data_path = '../Data/'
file_train = data_path + 'train.csv'
file_test = data_path + 'test.csv'
def sign_log1p_abs(x):
return np.sign(x) * np.log1p(np.abs(x))
# Parameters:
# df:2 or 3 columns DataFrame
# row: a column for row labels
# col: a column for column labels
# val: a column for values or None(values=1)
# Returns:
# mat: csr matrix The columns are sorted by their frequency(decending).
# label2row: a map from a row label to a row number of mat
# label2column: a map from a column label to a column number of mat
def DataFrame_tocsr(df, row, col, val=None, label2row=None, label2col=None,
return_tbl=False, min_count=1):
if label2row is None:
row_labels = df[row].dropna().unique() # pd.Series.unique does not sort
label2row = pd.Series(range(row_labels.size), index=row_labels)
if val is None:
df = df[[row, col]].dropna()
vals = pd.Series(np.ones(df.shape[0]))
else:
df = df[[row, col, val]].dropna()
vals = df[val].values
if label2col is None:
col_label_cnt = df[col].value_counts()
if min_count > 1:
col_label_cnt = col_label_cnt[col_label_cnt >= min_count]
col_labels = col_label_cnt.index
label2col = pd.Series(range(col_labels.size), index=col_labels)
rows = df[row].map(label2row)
cols = df[col].map(label2col)
if cols.size == 0:
return False
mat = sp.sparse.coo_matrix((vals, (rows, cols)), shape=(label2row.size, label2col.size)).tocsr()
if return_tbl:
return mat, label2row, label2col
else:
return mat
def feature_extraction(training=None, test=None, useUpc=False):
if training is None and test is None:
training = | pd.read_csv(file_train) | pandas.read_csv |
"""
Queries to retrieve data from database
"""
import pandas as pd
from .utils import convert_lat, convert_lon, rename_categories
DATE_FORMAT = '%m/%d/%y %H:%M:%S'
TIME_FORMAT = '%H%M'
FAR_PARTS = "'121 ', '125 '"
EVENTS_COLUMNS = (
'ev_id',
'ntsb_no',
'ev_type',
'ev_date',
'ev_time',
'ev_tmzn',
'ev_city',
'ev_state',
'ev_country',
'ev_site_zipcode',
'mid_air',
'on_ground_collision',
'latitude',
'longitude',
'latlong_acq',
'apt_name',
'ev_nr_apt_id',
'ev_nr_apt_loc',
'apt_dist',
'apt_dir',
'apt_elev',
'wx_brief_comp',
'wx_src_iic',
'wx_obs_time',
'wx_obs_dir',
'wx_obs_fac_id',
'wx_obs_elev',
'wx_obs_dist',
'wx_obs_tmzn',
'light_cond',
'sky_cond_nonceil',
'sky_nonceil_ht',
'sky_ceil_ht',
'sky_cond_ceil',
'vis_rvr',
'vis_rvv',
'vis_sm',
'wx_temp',
'wx_dew_pt',
'wind_dir_deg',
'wind_dir_ind',
'wind_vel_kts',
'wind_vel_ind',
'gust_ind',
'gust_kts',
'altimeter',
'wx_dens_alt',
'wx_int_precip',
'metar',
'ev_highest_injury',
'inj_f_grnd',
'inj_m_grnd',
'inj_s_grnd',
'inj_tot_f',
'inj_tot_m',
'inj_tot_n',
'inj_tot_s',
'inj_tot_t',
'invest_agy',
'ntsb_docket',
'ntsb_notf_from',
'ntsb_notf_date',
'ntsb_notf_tm',
'fiche_number',
'wx_cond_basic',
'faa_dist_office'
)
EVENTS_NUMERIC = (
"apt_elev",
"apt_dir",
"gust_kts",
"inj_f_grnd",
"inj_m_grnd",
"inj_s_grnd",
"inj_tot_f",
"inj_tot_m",
"inj_tot_n",
"inj_tot_s",
"inj_tot_t",
"ntsb_notf_tm",
"vis_rvv",
"wind_dir_deg",
"wx_obs_dist",
"wx_obs_time",
"wx_dew_pt",
"wx_obs_dir",
"wx_temp",
"altimeter",
"apt_dist",
"vis_rvr",
"vis_sm",
"ntsb_docket",
"sky_ceil_ht",
"sky_nonceil_ht",
"wx_obs_elev",
"wx_dens_alt"
)
EVENTS_CATEGORICAL = (
"ev_highest_injury",
"ev_nr_apt_loc",
"ev_state",
"ev_tmzn",
"ev_type",
"gust_ind",
"invest_agy",
"latlong_acq",
"light_cond",
"mid_air",
"on_ground_collision",
"sky_cond_ceil",
"sky_cond_nonceil",
"wind_dir_ind",
"wind_vel_ind",
"wx_brief_comp",
"wx_cond_basic",
"wx_int_precip",
"wx_src_iic"
)
AIRCRAFT_COLUMNS = (
"ev_id",
"Aircraft_Key",
"regis_no",
"ntsb_no",
"acft_missing",
"far_part",
"flt_plan_filed",
"flight_plan_activated",
"damage",
"acft_fire",
"acft_expl",
"acft_make",
"acft_model",
"acft_series",
"acft_serial_no",
"cert_max_gr_wt",
"acft_category",
"acft_reg_cls",
"homebuilt",
"date_last_insp",
"afm_hrs",
"afm_hrs_last_insp",
"commercial_space_flight",
"unmanned",
"ifr_equipped_cert",
"elt_mounted_aircraft",
"elt_connected_antenna",
"afm_hrs_since",
"air_medical",
"certs_held",
"dest_apt_id",
"dest_country",
"dest_same_local",
"dest_state",
"dprt_apt_id",
"dprt_country",
"dprt_pt_same_ev",
"dprt_state",
"dprt_timezn",
"elt_aided_loc_ev",
"elt_install",
"elt_oper",
"elt_type",
"evacuation",
"fixed_retractable",
"oper_addr_same",
"oper_cert",
"oper_code",
"oper_country",
"oper_dom_int",
"oper_individual_name",
"oper_pax_cargo",
"oper_same",
"oper_sched",
"oper_state",
"oprtng_cert",
"owner_country",
"owner_state",
"report_to_icao",
"second_pilot",
"site_seeing",
"type_fly",
"type_last_insp",
"dest_city",
"dprt_city",
"med_type_flight",
"oper_cert_num",
"oper_city",
"oper_dba",
"oper_name",
"oper_street",
"oper_zip",
"owner_acft",
"owner_city",
"owner_street",
"owner_zip",
"rwy_num",
"fuel_on_board",
"elt_manufacturer",
"elt_model",
"elt_reason_other",
"cc_seats",
"fc_seats",
"pax_seats",
"phase_flt_spec",
"rwy_len",
"rwy_width",
"acft_year",
"dprt_time",
"total_seats",
"num_eng"
)
AIRCRAFT_CATEGORICAL = (
"commercial_space_flight",
"unmanned",
"ifr_equipped_cert",
"elt_mounted_aircraft",
"elt_connected_antenna",
"acft_category",
"acft_expl",
"acft_fire",
"acft_missing",
"flt_plan_filed",
"homebuilt",
"afm_hrs_since",
"air_medical",
"certs_held",
"damage",
"dest_apt_id",
"dest_country",
"dest_same_local",
"dest_state",
"dprt_apt_id",
"dprt_country",
"dprt_pt_same_ev",
"dprt_state",
"dprt_timezn",
"elt_aided_loc_ev",
"elt_install",
"elt_oper",
"elt_type",
"evacuation",
"far_part",
"fixed_retractable",
"flight_plan_activated",
"oper_addr_same",
"oper_cert",
"oper_code",
"oper_country",
"oper_dom_int",
"oper_individual_name",
"oper_pax_cargo",
"oper_same",
"oper_sched",
"oper_state",
"oprtng_cert",
"owner_country",
"owner_state",
"report_to_icao",
"second_pilot",
"site_seeing",
"type_fly",
"type_last_insp",
"acft_make",
"acft_model",
"acft_series",
"dest_city",
"dprt_city",
"med_type_flight",
"oper_cert_num",
"oper_city",
"oper_dba",
"oper_name",
"oper_street",
"oper_zip",
"owner_acft",
"owner_city",
"owner_street",
"owner_zip",
"rwy_num",
"fuel_on_board",
"elt_manufacturer",
"elt_model",
"elt_reason_other"
)
AIRCRAFT_NUMERIC = (
"Aircraft_Key",
"cc_seats",
"cert_max_gr_wt",
"fc_seats",
"pax_seats",
"rwy_len",
"rwy_width",
"acft_year",
"afm_hrs",
"afm_hrs_last_insp",
"dprt_time",
"total_seats",
"num_eng",
"phase_flt_spec",
)
OCCURRENCES_COLUMNS = (
"ev_id",
"Aircraft_Key",
"Occurrence_No",
"Occurrence_Code",
"Phase_of_Flight",
"Altitude"
)
OCCURENCES_NUMERIC = (
"Aircraft_Key",
"Occurrence_No",
"Altitude",
"Phase_of_Flight"
)
OCCURRENCE_CATEGORICAL = (
"Occurrence_Code",
"Phase_of_Flight",
)
FLIGHT_TIME_COLS = (
"ev_id",
"Aircraft_Key",
"crew_no",
"flight_type",
"flight_craft",
"flight_hours"
)
FLIGHT_TIME_NUMERIC = (
"Aircraft_Key",
"crew_no",
"flight_hours"
)
FLIGHT_TIME_CATEGORICAL = (
"flight_type",
"flight_craft"
)
SEQ_OF_EVETNS_COLUMNS = (
"ev_id",
"Aircraft_Key",
"Occurrence_No",
"seq_event_no",
"group_code",
"Subj_Code",
"Cause_Factor",
"Modifier_Code",
"Person_Code",
)
SEQ_OF_EVENTS_NUMERIC = (
"Aircraft_Key",
"Occurrence_No",
"seq_event_no"
)
SEQ_OF_EVENTS_CATEGORICAL = (
"group_code",
"Subj_Code",
"Cause_Factor",
"Modifier_Code",
"Person_Code",
)
FLIGHT_CREW_COLS = (
'ev_id',
'Aircraft_Key',
'crew_no',
'crew_category',
'crew_age',
'crew_sex'
)
FLIGHT_CREW_NUMERIC = (
'crew_no',
'crew_age'
)
FLIGHT_CREW_CATEGORICAL = (
'crew_category',
'crew_sex'
)
def get_codes_meaning(con, table, column):
query = (
"select distinct code_iaids, meaning from eADMSPUB_DataDictionary "
f"where \"Table\"='{table}' and \"Column\"='{column}'"
)
return pd.read_sql(query, con, index_col='code_iaids')
def get_events_accidents(con):
query = ("SELECT {cols} FROM events "
"WHERE ev_type='ACC' AND ev_date IS NOT NULL "
"AND ev_id IN (SELECT ev_id FROM aircraft WHERE "
"far_part IN ({far_parts}))".format(
cols=", ".join(EVENTS_COLUMNS),
far_parts=FAR_PARTS
)
)
events = pd.read_sql_query(query, con,
index_col='ev_id',
parse_dates={'ev_date': DATE_FORMAT,
'ev_time': TIME_FORMAT,
'ntsb_notf_date': DATE_FORMAT,
'ntsb_notf_tm': TIME_FORMAT
}
)
for c in EVENTS_NUMERIC:
events[c] = pd.to_numeric(events[c], errors='coerce')
for c in EVENTS_CATEGORICAL:
events[c] = events[c].astype('category')
events['latitude'] = events['latitude'].apply(convert_lat)
events['longitude'] = events['longitude'].apply(convert_lon)
return events
def get_events_all(con):
query = ("SELECT {cols} FROM events "
.format(
cols=", ".join(EVENTS_COLUMNS)
)
)
events = pd.read_sql_query(query, con,
index_col='ev_id',
parse_dates={'ev_date': DATE_FORMAT,
'ev_time': TIME_FORMAT,
'ntsb_notf_date': DATE_FORMAT,
'ntsb_notf_tm': TIME_FORMAT
}
)
for c in EVENTS_NUMERIC:
events[c] = pd.to_numeric(events[c], errors='coerce')
for c in EVENTS_CATEGORICAL:
events[c] = events[c].astype('category')
events['latitude'] = events['latitude'].apply(convert_lat)
events['longitude'] = events['longitude'].apply(convert_lon)
return events
def get_aircrafts_accidents(con):
ac_columns = ", ".join(AIRCRAFT_COLUMNS)
query = (
f"SELECT {ac_columns} FROM aircraft WHERE ev_id IN "
"(SELECT ev_id FROM events WHERE ev_type='ACC' AND "
f"ev_date IS NOT NULL) AND far_part IN ({FAR_PARTS})"
)
aircrafts = pd.read_sql(query, con,
parse_dates={'date_last_insp': DATE_FORMAT}
)
for c in AIRCRAFT_NUMERIC:
aircrafts[c] = pd.to_numeric(aircrafts[c], errors='coerce')
# phase_flt_spec is parsed as numeric and this is used to get phases of
# flight with less detail (ie. "Takeoff - roll/run" -> "Takeoff)
aircrafts['phase_flt_spec_gross'] = ((aircrafts.phase_flt_spec // 10) * 10)
new_categorical_cols = ['phase_flt_spec_gross', 'phase_flt_spec']
for c in list(AIRCRAFT_CATEGORICAL) + new_categorical_cols:
aircrafts[c] = aircrafts[c].astype('category')
PHASE_FLT_SPEC_DICT = get_codes_meaning(con, 'aircraft', 'phase_flt_spec')
# Change codes for names (ie. 570 to Landing)
cats = rename_categories(aircrafts['phase_flt_spec_gross'].cat.categories,
PHASE_FLT_SPEC_DICT)
aircrafts['phase_flt_spec_gross'].cat.rename_categories(cats, inplace=True)
cats = rename_categories(aircrafts['phase_flt_spec'].cat.categories,
PHASE_FLT_SPEC_DICT)
aircrafts['phase_flt_spec'].cat.rename_categories(cats, inplace=True)
return aircrafts
def get_aircrafts_all(con):
ac_columns = ", ".join(AIRCRAFT_COLUMNS)
query = (
f"SELECT {ac_columns} FROM aircraft "
)
aircrafts = pd.read_sql(query, con,
parse_dates={'date_last_insp': DATE_FORMAT}
)
for c in AIRCRAFT_NUMERIC:
aircrafts[c] = pd.to_numeric(aircrafts[c], errors='coerce')
# phase_flt_spec is parsed as numeric and this is used to get phases of
# flight with less detail (ie. "Takeoff - roll/run" -> "Takeoff)
aircrafts['phase_flt_spec_gross'] = ((aircrafts.phase_flt_spec // 10) * 10)
new_categorical_cols = ['phase_flt_spec_gross', 'phase_flt_spec']
for c in list(AIRCRAFT_CATEGORICAL) + new_categorical_cols:
aircrafts[c] = aircrafts[c].astype('category')
PHASE_FLT_SPEC_DICT = get_codes_meaning(con, 'aircraft', 'phase_flt_spec')
# Change codes for names (ie. 570 to Landing)
cats = rename_categories(aircrafts['phase_flt_spec_gross'].cat.categories,
PHASE_FLT_SPEC_DICT)
aircrafts['phase_flt_spec_gross'].cat.rename_categories(cats, inplace=True)
cats = rename_categories(aircrafts['phase_flt_spec'].cat.categories,
PHASE_FLT_SPEC_DICT)
aircrafts['phase_flt_spec'].cat.rename_categories(cats, inplace=True)
return aircrafts
def get_occurrences_accidents(con):
occurrence_cols = ", ".join(OCCURRENCES_COLUMNS)
query = (
f"SELECT {occurrence_cols} FROM Occurrences WHERE ev_id IN "
"(SELECT ev_id FROM events WHERE ev_type='ACC' AND "
"ev_date IS NOT NULL) AND ev_id IN (SELECT ev_id FROM aircraft "
f"WHERE far_part in ({FAR_PARTS}))"
)
occurrences = pd.read_sql(query, con)
for c in OCCURENCES_NUMERIC:
occurrences[c] = | pd.to_numeric(occurrences[c], errors='coerce') | pandas.to_numeric |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import contextlib
import argparse
import time
import os
import numpy as np
import pandas as pd
import mindspore
from mindspore import Model, context, load_checkpoint, load_param_into_net, ops, Tensor
from third_party import arithmetic_coding_base
from process_data import feature_extraction, feature_arrange
from src.dataset import normalization
from src.tools.utils import bin_loader, write_ply, chamfer_distance
from src.tools.octree_base import Octree, deserialize_depth_first
import src.network as network
def def_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--test_dataset', '-t', type=str, default='/home/OctSqueeze/test_dataset/')
parser.add_argument('--compression', '-c', type=str, default='/home/OctSqueeze/experiment/compression/')
parser.add_argument('--recon', '-r', type=str, default='/home/OctSqueeze/experiment/recon/')
parser.add_argument(
'--model', '-m', type=str, default='/home/OctSqueeze/checkpoint/octsqueeze.ckpt', help='route of model')
parser.add_argument('--device_target', type=str, default="Ascend", choices=['Ascend', 'GPU', 'CPU'],
help='device where the code will be implemented')
return parser.parse_args()
def compression_decompression_simulation(dataset_path, precision_oct):
# read test data names
frames = []
for filename in os.listdir(dataset_path):
if filename.endswith('.bin'):
frames.append('{}'.format(filename))
# Load network
# Configure operation information
context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target, device_id=0)
## Create networks
net = network.OctSqueezeNet()
param_dict = load_checkpoint(args.model)
load_param_into_net(net, param_dict)
bpip_CD = np.empty([len(frames), 2])
time_sum = 0
frame_num = len(frames)
metrics = {
}
predict_net = Model(net, metrics=metrics)
for frame_idx, frame in enumerate(frames):
data_route = os.path.join(dataset_path, frame)
pcd_example = bin_loader(data_route)
points_num = pcd_example.shape[0]
max_range = (2 ** 13 - 1) * 0.01
tree = Octree(max_range=max_range, precision=precision_oct)
for j in range(pcd_example.shape[0]):
tree.insert_node(tree, tree.size, None, pcd_example[j], 0)
feature_branch = feature_extraction(tree)
feature_branch = np.array(feature_branch)
nodes = feature_arrange(feature_branch, points_num)
start = time.time()
cur_node = normalization(nodes['cur_node'].astype(np.float32))
parent1 = normalization(nodes['parent1'].astype(np.float32))
parent2 = normalization(nodes['parent2'].astype(np.float32))
parent3 = normalization(nodes['parent3'].astype(np.float32))
feature = Tensor(np.concatenate([cur_node, parent1, parent2, parent3], axis=1))
output = predict_net.predict(feature)
output.set_dtype(mindspore.float32)
softmax = ops.Softmax()
output = softmax(output)
output = output.asnumpy()
end = time.time()
time_cost = end - start
time_sum = time_sum + time_cost
# Write compressed file
f_frame = open(os.path.join(args.compression, frame), "wb")
with contextlib.closing(arithmetic_coding_base.BitOutputStream(f_frame)) as bitout:
enc = arithmetic_coding_base.ArithmeticEncoder(32, bitout)
for node_idx in range(output.shape[0]):
gt_occupancy = int(nodes['gt'][node_idx])
distribution = output[node_idx]
distribution_value = np.floor(distribution * 10000)
label = np.ones(257)
label[:256] += distribution_value
frequencies = arithmetic_coding_base.SimpleFrequencyTable(label.astype(int))
enc.write(frequencies, gt_occupancy)
enc.finish()
f_frame.close()
file_size = os.path.getsize(os.path.join(args.compression, frame)) * 8
# occupancy stream in the compressed binary file
occupancy_stream = nodes['gt'].astype(np.int)
recon_tree = Octree(max_range=max_range, precision=precision_oct)
recon_tree, recon_points = deserialize_depth_first(iter(occupancy_stream), recon_tree.max_depth, recon_tree)
recon_points = np.array(recon_points).astype(np.float32)
df = | pd.DataFrame(recon_points[:, :3], columns=['x', 'y', 'z']) | pandas.DataFrame |
import nlu
from nlu.discovery import Discoverer
from nlu.pipe.utils.storage_ref_utils import StorageRefUtils
from typing import List, Tuple, Optional, Dict, Union
import streamlit as st
from nlu.utils.modelhub.modelhub_utils import ModelHubUtils
import numpy as np
import pandas as pd
from nlu.pipe.viz.streamlit_viz.streamlit_utils_OS import StreamlitUtilsOS
from nlu.pipe.viz.streamlit_viz.gen_streamlit_code import get_code_for_viz
from nlu.pipe.viz.streamlit_viz.styles import _set_block_container_style
import random
from nlu.pipe.viz.streamlit_viz.streamlit_viz_tracker import StreamlitVizTracker
class WordSimilarityStreamlitBlock():
@staticmethod
def display_word_similarity(
pipe, # nlu pipe
default_texts: Tuple[str, str] = ("<NAME> likes to party!", "<NAME> likes to party!"),
threshold: float = 0.5,
title: Optional[str] = "Embeddings Similarity Matrix & Visualizations ",
sub_tile: Optional[
str] = "Visualize `word-wise similarity matrix` and calculate `similarity scores` for `2 texts` and every `word embedding` loaded",
write_raw_pandas: bool = False,
display_embed_information: bool = True,
similarity_matrix=True,
show_algo_select: bool = True,
dist_metrics: List[str] = ('cosine'),
set_wide_layout_CSS: bool = True,
generate_code_sample: bool = False,
key: str = "NLU_streamlit",
num_cols: int = 2,
display_scalar_similarities: bool = False,
display_similarity_summary: bool = False,
model_select_position: str = 'side', # main or side
show_infos: bool = True,
show_logo: bool = True,
):
"""We visualize the following cases :
1. Simmilarity between 2 words - > sim (word_emb1, word_emb2)
2. Simmilarity between 2 sentences -> let weTW stand word word_emb of token T and sentence S
2.1. Raw token level with merged embeddings -> sim([we11,we21,weT1], [we12,we22,weT2])
2.2 Autogenerate sentemb, basically does 2.1 in the Spark NLP backend
2.3 Already using sentence_embedder model -> sim(se1,se2)
3. Simmilarity between token and sentence -> sim([we11,w21,wT1], se2)
4. Mirrored 3
"""
# https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics.pairwise
StreamlitVizTracker.footer_displayed = False
try:
import plotly.express as px
from sklearn.metrics.pairwise import distance_metrics
except:
st.error(
"You need the sklearn and plotly package in your Python environment installed for similarity visualizations. Run <pip install sklearn plotly>")
if set_wide_layout_CSS: _set_block_container_style()
if title: st.header(title)
if show_logo: StreamlitVizTracker.show_logo()
if sub_tile: st.subheader(sub_tile)
StreamlitVizTracker.loaded_word_embeding_pipes = []
dist_metric_algos = distance_metrics()
dist_algos = list(dist_metric_algos.keys())
if 'haversine' in dist_algos: dist_algos.remove('haversine') # not applicable in >2D
if 'precomputed' in dist_algos: dist_algos.remove('precomputed') # Not a dist
cols = st.beta_columns(2)
text1 = cols[0].text_input("Text or word1", default_texts[0], key=key+'field_1')
text2 = cols[1].text_input("Text or word2", default_texts[1], key=key+'field_2') if len(default_texts) > 1 else cols[
1].text_input("Text or word2", 'Please enter second string', key=key)
# exp = st.sidebar.beta_expander("Select additional Embedding Models and distance metric to compare ")
e_coms = StreamlitUtilsOS.find_all_embed_components(pipe)
embed_algos_to_load = []
embed_pipes = [pipe]
dist_algo_selection = dist_metrics
if show_algo_select:
# emb_components_usable = Discoverer.get_components('embed')
emb_components_usable = [e for e in Discoverer.get_components('embed', True, include_aliases=True) if
'chunk' not in e and 'sentence' not in e]
loaded_embed_nlu_refs = []
loaded_storage_refs = []
loaded_embed_nlu_refs = list(set(loaded_embed_nlu_refs))
for c in e_coms:
if not hasattr(c.info, 'nlu_ref'): continue
r = c.info.nlu_ref
if 'en.' not in r and 'embed.' not in r and 'ner' not in r:
loaded_embed_nlu_refs.append('en.embed.' + r)
elif 'en.' in r and 'embed.' not in r and 'ner' not in r:
r = r.split('en.')[0]
loaded_embed_nlu_refs.append('en.embed.' + r)
else:
loaded_embed_nlu_refs.append(StorageRefUtils.extract_storage_ref(c))
loaded_storage_refs.append(StorageRefUtils.extract_storage_ref(c))
for p in StreamlitVizTracker.loaded_word_embeding_pipes:
if p != pipe: loaded_embed_nlu_refs.append(p.nlu_ref)
for l in loaded_embed_nlu_refs:
if l not in emb_components_usable: emb_components_usable.append(l)
# embed_algo_selection = exp.multiselect("Click to pick additional Embedding Algorithm",options=emb_components_usable,default=loaded_embed_nlu_refs,key = key)
# dist_algo_selection = exp.multiselect("Click to pick additional Distance Metric", options=dist_algos, default=dist_metrics, key = key)
emb_components_usable.sort()
loaded_embed_nlu_refs.sort()
dist_algos.sort()
if model_select_position == 'side':
embed_algo_selection = st.sidebar.multiselect(
"Pick additional Word Embeddings for the Similarity Matrix", options=emb_components_usable,
default=loaded_embed_nlu_refs, key=key)
dist_algo_selection = st.sidebar.multiselect("Pick additional Similarity Metrics ", options=dist_algos,
default=dist_metrics, key=key)
else:
exp = st.beta_expander("Pick additional Word Embeddings and Similarity Metrics")
embed_algo_selection = exp.multiselect("Pick additional Word Embeddings for the Similarity Matrix",
options=emb_components_usable, default=loaded_embed_nlu_refs,
key=key)
dist_algo_selection = exp.multiselect("Pick additional Similarity Metrics ", options=dist_algos,
default=dist_metrics, key=key)
embed_algos_to_load = list(set(embed_algo_selection) - set(loaded_embed_nlu_refs))
for embedder in embed_algos_to_load: embed_pipes.append(nlu.load(embedder))
if generate_code_sample: st.code(
get_code_for_viz('SIMILARITY', [StreamlitUtilsOS.extract_name(p) for p in embed_pipes], default_texts))
StreamlitVizTracker.loaded_word_embeding_pipes += embed_pipes
similarity_metrics = {}
embed_vector_info = {}
cols_full = True
col_index = 0
# for p in embed_pipes :
for p in StreamlitVizTracker.loaded_word_embeding_pipes:
data1 = p.predict(text1, output_level='token', get_embeddings=True).dropna()
data2 = p.predict(text2, output_level='token', get_embeddings=True).dropna()
e_coms = StreamlitUtilsOS.find_all_embed_components(p)
modelhub_links = [ModelHubUtils.get_url_by_nlu_refrence(c.info.nlu_ref) if hasattr(c.info,
'nlu_ref') else ModelHubUtils.get_url_by_nlu_refrence(
'') for c in e_coms]
e_cols = StreamlitUtilsOS.get_embed_cols(p)
for num_emb, e_col in enumerate(e_cols):
if col_index == num_cols - 1: cols_full = True
if cols_full:
cols = st.beta_columns(num_cols)
col_index = 0
cols_full = False
else:
col_index += 1
tok1 = data1['token']
tok2 = data2['token']
emb1 = data1[e_col]
emb2 = data2[e_col]
def normalize_matrix(m):
return np.nan_to_num(m / np.linalg.norm(m, axis=1, keepdims=True))
embed_mat1 = normalize_matrix(np.array([x for x in emb1]))
embed_mat2 = normalize_matrix(np.array([x for x in emb2]))
# e_name = e_col.split('word_embedding_')[-1]
e_name = e_coms[num_emb].info.nlu_ref if hasattr(e_coms[num_emb].info, 'nlu_ref') else \
e_col.split('word_embedding_')[-1] if 'en.' in e_col else e_col
e_name = e_name.split('embed.')[-1] if 'en.' in e_name else e_name
if 'ner' in e_name: e_name = loaded_storage_refs[num_emb]
embed_vector_info[e_name] = {"Vector Dimension ": embed_mat1.shape[1],
"Num Vectors": embed_mat1.shape[0] + embed_mat1.shape[0],
"NLU_reference": e_coms[num_emb].info.nlu_ref if hasattr(
e_coms[num_emb].info, 'nlu_ref') else ' ',
"Spark_NLP_reference": ModelHubUtils.NLU_ref_to_NLP_ref(
e_coms[num_emb].info.nlu_ref if hasattr(e_coms[num_emb].info,
'nlu_ref') else ' '),
"Storage Reference": loaded_storage_refs[num_emb],
'Modelhub info': modelhub_links[num_emb]}
for dist_algo in dist_algo_selection:
# scalar_similarities[e_col][dist_algo]={}
sim_score = ((dist_metric_algos[dist_algo](embed_mat1, embed_mat2) - 1) * -1)
sim_score = | pd.DataFrame(sim_score) | pandas.DataFrame |
from sklearn.preprocessing import MinMaxScaler
import pandas as pd
data = pd.read_csv("B05_discharge_soh.csv")
yenisatirvoltage= pd.DataFrame()
yenisatircurrent= pd.DataFrame()
yenisatirtemperature= pd.DataFrame()
yenisatirsoh= pd.DataFrame()
birlestirilendizi=pd.DataFrame(columns=['v1','v2','v3','v4','v5','v6','v7','v8','v9','v10',
'c1','c2','c3','c4','c5','c6','c7','c8','c9','c10',
't1','t2','t3','t4','t5','t6','t7','t8','t9','t10','soh1'])
scaler = MinMaxScaler(feature_range=(0, 1))
scaler2 = MinMaxScaler(feature_range=(0, 1))
scaler3 = MinMaxScaler(feature_range=(0, 1))
volt=pd.DataFrame()
curr=pd.DataFrame()
temp=pd.DataFrame()
cycle=data.iloc[:1,7:8]
soh=data.iloc[:1,8:9]
cycledegeri=int(cycle.loc[0])
cyclesatiradet=0
eskicyclesatiradet=0
j=0
while j<50285:
for k in range(0,500):
konum=data.iloc[j+k:j+k+1,7:8]
if(j+k<50285):
if(cycledegeri==int(konum.loc[j+k])):
continue
else:
cycledegeri=int(konum.loc[j+k])
cyclesatiradet=k+1
break
else:
cyclesatiradet=k
break
soh=data.iloc[j:j+1,8:9]
data1=data.iloc[j:j+cyclesatiradet+1,0:3]
data2=data.iloc[j:j+cyclesatiradet+1,7:9]
scaledvoltage = scaler.fit_transform(data1.iloc[:,0:1])
scaledcurrent = scaler2.fit_transform(data1.iloc[:,1:2])
scaledtemp = scaler3.fit_transform(data1.iloc[:,2:3])
volt=pd.DataFrame(scaledvoltage)
curr= | pd.DataFrame(scaledcurrent) | pandas.DataFrame |
# importing all the required libraries
import numpy as np
import pandas as pd
from datetime import datetime
import time, datetime
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler, LabelEncoder, MinMaxScaler
from chart_studio.plotly import plotly
import plotly.offline as offline
import plotly.graph_objs as go
offline.init_notebook_mode()
from collections import Counter
import pickle
from sklearn.model_selection import train_test_split
import lightgbm as lgb
import math
from tqdm import tqdm
# Reading all the files
air_visit_data = pd.read_csv('air_visit_data.csv')
air_store_info = pd.read_csv('air_store_info.csv')
air_reserve = pd.read_csv('air_reserve.csv')
hpg_store_info = | pd.read_csv('hpg_store_info.csv') | pandas.read_csv |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, date_range)
from pandas.core.index import MultiIndex
from pandas.compat import StringIO, lrange, range, u
from pandas import compat
import pandas.util.testing as tm
from .common import TestData
class TestSeriesRepr(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_multilevel_name_print(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(lrange(0, len(index)), index=index, name='sth')
expected = ["first second", "foo one 0",
" two 1", " three 2",
"bar one 3", " two 4",
"baz two 5", " three 6",
"qux one 7", " two 8",
" three 9", "Name: sth, dtype: int64"]
expected = "\n".join(expected)
self.assertEqual(repr(s), expected)
def test_name_printing(self):
# test small series
s = Series([0, 1, 2])
s.name = "test"
self.assertIn("Name: test", repr(s))
s.name = None
self.assertNotIn("Name:", repr(s))
# test big series (diff code path)
s = Series(lrange(0, 1000))
s.name = "test"
self.assertIn("Name: test", repr(s))
s.name = None
self.assertNotIn("Name:", repr(s))
s = Series(index=date_range('20010101', '20020101'), name='test')
self.assertIn("Name: test", repr(s))
def test_repr(self):
str(self.ts)
str(self.series)
str(self.series.astype(int))
str(self.objSeries)
str(Series(tm.randn(1000), index=np.arange(1000)))
str(Series(tm.randn(1000), index=np.arange(1000, 0, step=-1)))
# empty
str(self.empty)
# with NaNs
self.series[5:7] = np.NaN
str(self.series)
# with Nones
ots = self.ts.astype('O')
ots[::2] = None
repr(ots)
# various names
for name in ['', 1, 1.2, 'foo', u('\u03B1\u03B2\u03B3'),
'loooooooooooooooooooooooooooooooooooooooooooooooooooong',
('foo', 'bar', 'baz'), (1, 2), ('foo', 1, 2.3),
(u('\u03B1'), u('\u03B2'), u('\u03B3')),
(u('\u03B1'), 'bar')]:
self.series.name = name
repr(self.series)
biggie = Series(tm.randn(1000), index=np.arange(1000),
name=('foo', 'bar', 'baz'))
repr(biggie)
# 0 as name
ser = Series(np.random.randn(100), name=0)
rep_str = repr(ser)
self.assertIn("Name: 0", rep_str)
# tidy repr
ser = Series(np.random.randn(1001), name=0)
rep_str = repr(ser)
self.assertIn("Name: 0", rep_str)
ser = Series(["a\n\r\tb"], name="a\n\r\td", index=["a\n\r\tf"])
self.assertFalse("\t" in repr(ser))
self.assertFalse("\r" in repr(ser))
self.assertFalse("a\n" in repr(ser))
# with empty series (#4651)
s = | Series([], dtype=np.int64, name='foo') | pandas.Series |
import pandas as pd
from src.models.globs import pp, beta_std
from src.data.make_dataset import model_id
D = pd.read_json('data/processed/joint_results.json')
exp = ['main', 'follow-up']
# Remove participants who failed attention check
att_check = D[D.stimquality=='bad']
att_check_exp = {k: att_check.query(f"expName=='{k}'") for k in exp}
att = {k: att_check_exp[k] for k in exp}
att['all'] = att_check
att_conf = {d:100*att[d].y.mean() for d in att}
max_tol = 2
bad_vps = att_check_exp['main'].groupby('participant').y.sum() > max_tol
bad_vps = list(bad_vps[bad_vps].index)
D = D[D.participant.apply(lambda x: x not in bad_vps)]
# Remove attention checks from data set.
att_stim = att_check.loc[:, 'mov_file_generated'].unique()
for x in att_stim:
D = D[D.mov_file_generated != x]
d1 = D[D.expName == 'main']
d2 = D[D.expName == 'follow-up']
# Catchtrials
catchcf = d1[d1.mp_type=='catchtrial'].y.mean()
# Remove catchtrials from further data analysis.
D = D[D.mp_type != 'catchtrial']
# Save result for logistic regression
scores_exp = []
for i, k in enumerate(exp):
scr = pd.read_json(f'data/interim/scores_exp{i+1}.json')
scr['model_id'] = scr.apply(model_id, axis=1)
scr.loc[scr.model_id == 'mapgpdm', 'model_id'] = 'map_gpdm'
scr.loc[scr.model_id == 'mapcgpdm', 'model_id'] = 'map_cgpdm'
scr = scr.set_index('model_id', verify_integrity=True)
gb = D[D.expName==k].groupby('model_id')
cf = gb.y.mean()
std = gb.y.apply(beta_std)
scr = pd.concat(
[scr, cf.rename('confusion_rate'), std.rename('std')],
axis=1, join='inner')
scores_exp += [scr]
scores = pd.concat(scores_exp, sort=False).reset_index()
scores.to_json('data/processed/joint_scores.json')
d_exp = {s: D[D.expName == s] for s in exp}
reg_data = []
for i, k in enumerate(exp):
new_cols = set(scores_exp[i]) - set(d_exp[k])
outer = d_exp[k].apply(lambda df: scores_exp[i].loc[df.model_id],
axis=1)
reg_data += [pd.concat([d_exp[k], outer[new_cols]], axis=1)]
reg_data = | pd.concat(reg_data, axis=0) | pandas.concat |
import operator
import warnings
import numpy as np
import pandas as pd
import pandas.testing as tm
import pytest
from pytest import param
import ibis
import ibis.expr.datatypes as dt
from ibis.backends.pandas.execution.temporal import day_name
@pytest.mark.parametrize('attr', ['year', 'month', 'day'])
@pytest.mark.parametrize(
"expr_fn",
[
param(lambda c: c.date(), id="date"),
param(
lambda c: c.cast("date"),
id="cast",
marks=pytest.mark.notimpl(["impala"]),
),
],
)
@pytest.mark.notimpl(["datafusion"])
def test_date_extract(backend, alltypes, df, attr, expr_fn):
expr = getattr(expr_fn(alltypes.timestamp_col), attr)()
expected = getattr(df.timestamp_col.dt, attr).astype('int32')
result = expr.name(attr).execute()
backend.assert_series_equal(result, expected.rename(attr))
@pytest.mark.parametrize(
'attr',
[
'year',
'month',
'day',
param('day_of_year', marks=pytest.mark.notimpl(["impala"])),
'quarter',
'hour',
'minute',
'second',
],
)
@pytest.mark.notimpl(["datafusion"])
def test_timestamp_extract(backend, alltypes, df, attr):
method = getattr(alltypes.timestamp_col, attr)
expr = method().name(attr)
result = expr.execute()
expected = backend.default_series_rename(
getattr(df.timestamp_col.dt, attr.replace('_', '')).astype('int32')
).rename(attr)
backend.assert_series_equal(result, expected)
@pytest.mark.notimpl(["datafusion", "clickhouse"])
@pytest.mark.notyet(["sqlite", "pyspark"])
def test_timestamp_extract_milliseconds(backend, alltypes, df):
expr = alltypes.timestamp_col.millisecond()
result = expr.execute()
expected = backend.default_series_rename(
(df.timestamp_col.dt.microsecond // 1_000).astype('int32')
).rename("millisecond")
backend.assert_series_equal(result, expected)
@pytest.mark.notimpl(["datafusion"])
def test_timestamp_extract_epoch_seconds(backend, alltypes, df):
expr = alltypes.timestamp_col.epoch_seconds()
result = expr.execute()
expected = backend.default_series_rename(
(df.timestamp_col.view("int64") // 1_000_000_000).astype("int32")
)
backend.assert_series_equal(result, expected)
@pytest.mark.notimpl(["datafusion"])
def test_timestamp_extract_week_of_year(backend, alltypes, df):
expr = alltypes.timestamp_col.week_of_year()
result = expr.execute()
expected = backend.default_series_rename(
df.timestamp_col.dt.isocalendar().week.astype("int32")
)
backend.assert_series_equal(result, expected)
@pytest.mark.parametrize(
'unit',
[
'Y',
'M',
'D',
param(
'W',
marks=pytest.mark.notimpl(
[
"clickhouse",
"duckdb",
"impala",
"mysql",
"postgres",
"pyspark",
"sqlite",
]
),
),
param('h', marks=pytest.mark.notimpl(["sqlite"])),
param('m', marks=pytest.mark.notimpl(["sqlite"])),
param('s', marks=pytest.mark.notimpl(["impala", "sqlite"])),
param(
'ms',
marks=pytest.mark.notimpl(
[
"clickhouse",
"impala",
"mysql",
"pyspark",
"sqlite",
]
),
),
param(
'us',
marks=pytest.mark.notimpl(
[
"clickhouse",
"impala",
"mysql",
"pyspark",
"sqlite",
]
),
),
param(
'ns',
marks=pytest.mark.notimpl(
[
"clickhouse",
"duckdb",
"impala",
"mysql",
"postgres",
"pyspark",
"sqlite",
]
),
),
],
)
@pytest.mark.notimpl(["datafusion"])
def test_timestamp_truncate(backend, alltypes, df, unit):
expr = alltypes.timestamp_col.truncate(unit)
dtype = f'datetime64[{unit}]'
expected = pd.Series(df.timestamp_col.values.astype(dtype))
result = expr.execute()
expected = backend.default_series_rename(expected)
backend.assert_series_equal(result, expected)
@pytest.mark.parametrize(
'unit',
[
'Y',
'M',
'D',
param(
'W',
marks=pytest.mark.notimpl(
[
"clickhouse",
"duckdb",
"impala",
"mysql",
"postgres",
"pyspark",
"sqlite",
]
),
),
],
)
@pytest.mark.notimpl(["datafusion"])
def test_date_truncate(backend, alltypes, df, unit):
expr = alltypes.timestamp_col.date().truncate(unit)
dtype = f"datetime64[{unit}]"
expected = pd.Series(df.timestamp_col.values.astype(dtype))
result = expr.execute()
expected = backend.default_series_rename(expected)
backend.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('unit', 'displacement_type'),
[
param(
'Y',
pd.offsets.DateOffset,
# TODO - DateOffset - #2553
marks=pytest.mark.notimpl(['dask']),
),
param('Q', pd.offsets.DateOffset, marks=pytest.mark.xfail),
param(
'M',
pd.offsets.DateOffset,
# TODO - DateOffset - #2553
marks=pytest.mark.notimpl(['dask']),
),
param(
'W',
pd.offsets.DateOffset,
# TODO - DateOffset - #2553
marks=pytest.mark.notimpl(['dask']),
),
param('D', pd.offsets.DateOffset),
param('h', pd.Timedelta),
param('m', pd.Timedelta),
param('s', pd.Timedelta),
param(
'ms',
pd.Timedelta,
marks=pytest.mark.notimpl(["clickhouse", "mysql"]),
),
param(
'us',
pd.Timedelta,
marks=pytest.mark.notimpl(["clickhouse"]),
),
],
)
@pytest.mark.notimpl(["datafusion", "pyspark", "sqlite"])
def test_integer_to_interval_timestamp(
backend, con, alltypes, df, unit, displacement_type
):
interval = alltypes.int_col.to_interval(unit=unit)
expr = alltypes.timestamp_col + interval
def convert_to_offset(offset, displacement_type=displacement_type):
resolution = f'{interval.type().resolution}s'
return displacement_type(**{resolution: offset})
with warnings.catch_warnings():
# both the implementation and test code raises pandas
# PerformanceWarning, because We use DateOffset addition
warnings.simplefilter("ignore", category=pd.errors.PerformanceWarning)
result = con.execute(expr)
offset = df.int_col.apply(convert_to_offset)
expected = df.timestamp_col + offset
expected = backend.default_series_rename(expected)
backend.assert_series_equal(result, expected)
@pytest.mark.parametrize(
'unit', ['Y', param('Q', marks=pytest.mark.xfail), 'M', 'W', 'D']
)
# TODO - DateOffset - #2553
@pytest.mark.notimpl(
[
"dask",
"datafusion",
"impala",
"mysql",
"pyspark",
"sqlite",
]
)
def test_integer_to_interval_date(backend, con, alltypes, df, unit):
interval = alltypes.int_col.to_interval(unit=unit)
array = alltypes.date_string_col.split('/')
month, day, year = array[0], array[1], array[2]
date_col = expr = (
ibis.literal('-').join(['20' + year, month, day]).cast('date')
)
expr = date_col + interval
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=pd.errors.PerformanceWarning)
result = con.execute(expr)
def convert_to_offset(x):
resolution = f'{interval.type().resolution}s'
return pd.offsets.DateOffset(**{resolution: x})
offset = df.int_col.apply(convert_to_offset)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=pd.errors.PerformanceWarning)
expected = pd.to_datetime(df.date_string_col) + offset
expected = backend.default_series_rename(expected)
backend.assert_series_equal(result, expected)
date_value = pd.Timestamp('2017-12-31')
timestamp_value = pd.Timestamp('2018-01-01 18:18:18')
@pytest.mark.parametrize(
('expr_fn', 'expected_fn'),
[
param(
lambda t, be: t.timestamp_col + ibis.interval(days=4),
lambda t, be: t.timestamp_col + | pd.Timedelta(days=4) | pandas.Timedelta |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = | Categorical(["a", "b", "c", "a"], ordered=True) | pandas.Categorical |
from copy import deepcopy
import requests
import os
import bs4
from openpyxl import load_workbook
import pandas as pd
from ..helpers.db_funcs import get_ep_id_by_number, get_season_id_by_number_type
from ..helpers.extract_helpers import search_for_new_seasons
import glob
import re
import numpy as np
DOCS_URL_TEMPLATE = 'https://docs.google.com/spreadsheets/d/{id}/export?format=xlsx&id={id}'
SURVIVOR_SOURCE = 'https://www.truedorktimes.com/survivor/boxscores/data.htm'
def create_data_dict(subset=None):
ret_dict = {}
sp = bs4.BeautifulSoup(requests.get(SURVIVOR_SOURCE).content)
cast_elements = sp.find_all('ul', attrs={'class': 'cast'})
for e in cast_elements:
attrs = e.find('a').attrs
try:
if 'spreadsheet' in attrs['href']:
v = attrs['href'][:-1].split('/')[-1]
k = str(e.text.lower())
for p in '- ':
k = k.replace(p, '_')
for p in ':.-,':
k = k.replace(p, '')
k = k.replace('\n', '')[1:]
if subset:
if k.split('_')[0] not in subset:
continue
ret_dict[k] = v
else:
pass
except KeyError:
pass
return ret_dict
def save_survivor_excel(sheets_id, readable_name, dest_folder='../data/raw'):
url = DOCS_URL_TEMPLATE.format(**dict(id=sheets_id))
f_name = '{readable_name}.xlsx'.format(readable_name=readable_name)
req = requests.get(url)
with open(os.path.join(dest_folder, f_name), 'wb') as f:
f.write(req.content)
req.close()
def pull_and_save_excels(data_dict=None, subset=None, dest_folder='../data/raw'):
if not data_dict:
data_dict = create_data_dict(subset=subset)
for k, v in data_dict.items():
save_survivor_excel(v, k, dest_folder=dest_folder)
# Above is for the actual excels...
def empty_cond(ws, cell, *args, **kwargs):
return not cell.value
def vertical_cond_tc(ws, cell, *args, **kwargs):
return empty_cond(ws, cell) or (cell.value == 'wanda')
def any_cond(ws, cell):
return False
def rc_horizontal_cond(ws, cell, col_names, *args, **kwargs):
above = ws.cell(row=cell.row - 1, column=cell.column).value
if isinstance(above, str):
ic_bool = ('IC' in above) or (
'Immunity challenge' in above) or ('RC' in above)
ic_bool = ic_bool and (len(col_names) != 0)
else:
ic_bool = False
return (not cell.value) or (ic_bool)
def ep_horizontal_cond(ws, cell, col_names, nblanks=5, *args, **kwargs):
add_numbers = [x for x in range(1, nblanks + 1)]
right_two = [not ws.cell(
row=cell.row, column=cell.column + i).value for i in add_numbers]
return all(right_two) and not cell.value
def normal_extract_values(ws, row, column_start, width, *args, **kwargs):
return pd.Series([ws.cell(row=row, column=column_start + i + 1).value for i in range(width)])
def vote_extract_values(ws, row, column_start, width, col_names, *args, **kwargs):
values = normal_extract_values(ws, row, column_start, width)
values = pd.Series([c for i, c in enumerate(
col_names) if pd.notnull(values[i])])
return values if len(values) > 0 else pd.Series([None])
def identity_pp(df, col_names, *args, **kwargs):
df.columns = col_names
df = df.loc[:, ~df.columns.duplicated()]
return df
def ep_pp(df, col_names, *args, **kwargs):
df = identity_pp(df, col_names, *args, **kwargs)
df = df[df.columns[~df.columns.isna()]]
return df
def vote_pp(df, col_names, *args, **kwargs):
if df.shape[1] > 1:
df = pd.DataFrame(pd.concat([df[col] for col in df]))
df.columns = ['voted_for']
df['vote_counted'] = ~df['voted_for'].isna()
df = df.loc[:, ~df.columns.duplicated()]
return df
def extract_subtable(ws, start_row, start_column, index_column=None, horizontal_condition=None,
vertical_condition=None, extract_values=None, postprocess=None):
if not horizontal_condition:
horizontal_condition = empty_cond
if not vertical_condition:
vertical_condition = empty_cond
if not extract_values:
extract_values = normal_extract_values
if not postprocess:
postprocess = identity_pp
row = start_row
col = start_column
col_names = []
rows = []
idx = []
while True:
cell = ws.cell(row=row, column=col)
v = cell.value
if horizontal_condition(ws, cell, col_names):
break
else:
col_names.append(v)
col += 1
n_voted_against = len(col_names)
col -= (n_voted_against + 1)
row += 1
while True:
idx_cell = ws.cell(
row=row, column=index_column if index_column else col)
if vertical_condition(ws, idx_cell):
break
else:
values = extract_values(ws, row, col, n_voted_against, col_names)
rows.append(values)
idx.append(idx_cell.value)
row += 1
df = pd.DataFrame(rows)
df.index = idx
df.index.name = 'contestant'
df = postprocess(df, col_names)
df.reset_index(inplace=True)
return df
def extract_rc_challenge(ws, c_row, c_column):
return extract_subtable(ws, c_row + 1, c_column, horizontal_condition=rc_horizontal_cond, index_column=1)
def extract_ic_challenge(ws, c_row, c_column):
return extract_subtable(ws, c_row + 1, c_column, horizontal_condition=rc_horizontal_cond, index_column=1)
def extract_tc(ws, c_row, c_column):
return extract_subtable(ws, c_row + 1, c_column, vertical_condition=vertical_cond_tc, extract_values=vote_extract_values, postprocess=vote_pp, index_column=1)
def extract_ep(ws, c_row, c_column):
return extract_subtable(ws, c_row, c_column, index_column=1, horizontal_condition=ep_horizontal_cond, postprocess=ep_pp)
def append_tc_data(df, ws, cell):
v = cell.value
try:
total_players = int(re.search('F(\d+)', v).group(1))
except AttributeError:
if 'No' in v:
return pd.DataFrame()
elif 'Morgan' in v:
total_players = 10
elif 'Drake' in v:
total_players = 9
elif 'Mokuta' in v:
total_players = 18
elif 'Vakama' in v:
total_players = 17
else:
raise
episode = ws.title[1:]
new_df = extract_tc(ws, cell.row, cell.column)
new_df['total_players_remaining'] = total_players
new_df['episode'] = int(re.match('(\d+).*', episode).group(1))
df = pd.concat([df, new_df], ignore_index=True)
return df
def append_challenge_data(df, ws, cell, challenge_type):
search = re.search('F(\d+)', cell.value)
if not search:
# We don't have information about the "final" amount, so we don't fill this in
final = None
else:
final = int(search.group(1))
if challenge_type == 'RC':
extract_f = extract_rc_challenge
elif challenge_type == 'IC':
extract_f = extract_ic_challenge
else:
raise ValueError
episode = ws.title[1:]
new_df = extract_f(ws, cell.row, cell.column)
new_df['total_players_remaining'] = final
new_df['episode'] = int(re.match('(\d+).*', episode).group(1))
try:
df = pd.concat([df, new_df], ignore_index=True)
except:
import pdb
pdb.set_trace()
return df
def append_episode_data(df, ws, cell):
episode = ws.title[1:]
new_df = extract_ep(ws, cell.row, cell.column)
new_df['episode'] = int(re.match('(\d+).*', episode).group(1))
df = | pd.concat([df, new_df], ignore_index=True) | pandas.concat |
#! /usr/bin/env python3
import pandas as pd
import numpy as np
import glob
from datetime import datetime
from dateutil.parser import parse
from elasticsearch import Elasticsearch
def _load_vmstat(monitoring_data):
monitoring_data["timestamp"] = pd.to_datetime(monitoring_data["timestamp"]+ 3600, unit='s')
monitoring_data = monitoring_data.rename(columns={"r":"processes","b":"waiting","swdp":"virtual mem","free":"free","buff":"buffers","si":"mem_on_disk","so":"mem_to_disk","bi":"blockIn","bo":"blockOut","in":"interrupts","cs":"switches","us":"cpu_user","sy":"cpu_system","id":"cpu_idle","wa":"blocked"})
return monitoring_data
def load_vmstat(load_from_cache=False,store_cache_file=False,cache_file=None):
monitoring_data = None
if load_from_cache and cache_file is not None:
monitoring_data = pd.read_csv(cache_file)
else:
for file in glob.glob("vmstats/*"):
df = pd.read_csv(file, skiprows = 0,error_bad_lines=False)
if monitoring_data is None:
monitoring_data = df
else:
monitoring_data = pd.concat([monitoring_data, df], sort=True)
#clean up data
monitoring_data = _load_vmstat(monitoring_data)
if store_cache_file:
monitoring_data.to_csv(cache_file)
return monitoring_data
def load_elastic(load_from_cache=False,store_cache_file=False,cache_file=None,es=None,experiment_dates=[]):
monitoring_data = None
if load_from_cache and cache_file is not None:
monitoring_data = pd.read_csv(cache_file)
else:
monitoring_data = collect_monitoring_data(es,"*",experiment_dates)
if store_cache_file:
if monitoring_data is not None:
monitoring_data.to_csv(cache_file)
return monitoring_data
def load_rmstats():
monitoring_data = None
for file in glob.glob("rmstats/*.csv"):
df = pd.read_csv(file, skiprows = 0,error_bad_lines=False)
if monitoring_data is None:
monitoring_data = df
else:
monitoring_data = pd.concat([monitoring_data, df], sort=True)
return monitoring_data
def load_experiment(load_from_cache=False,store_cache_file=False,data_cache_file=None):
data = None
if load_from_cache and data_cache_file is not None:
data = pd.read_csv(data_cache_file)
else:
data = __load()
if store_cache_file:
data.to_csv(data_cache_file)
return data
def __load():
all = None
for file in glob.glob("data/*"):
names = file[5:-4].split("-")
experiment=names[0]
method=names[1]
datetime.strptime
timestamp=file[5+len(experiment)+1+len(method)+1:-4]
date=timestamp[:timestamp.index("T")]
date=datetime.strptime(date, '%Y-%m-%d')
timestamp=parse(timestamp)
df = pd.read_csv(file, skiprows = 0,error_bad_lines=False)
df['experiment']=experiment
df['method']=method
df['startTime']=pd.Timestamp(year=timestamp.year,month=timestamp.month, day=timestamp.day, hour=timestamp.hour, minute=timestamp.minute)
df['runDate']=pd.Timestamp(year=date.year,month=date.month, day=date.day)
if (all is None):
all = df
else:
all = pd.concat([all, df], sort=True)
return all
def __collect(es,index,query):
data = []
page = es.search(
index = index,
scroll = '2m',
size = 1000,
body = query)
if '_scroll_id' in page:
sid = page['_scroll_id']
scroll_size = page['hits']['total']
data = data + page['hits']['hits']
# Start scrolling
while (scroll_size > 0):
page = es.scroll(scroll_id = sid, scroll = '2m')
# Update the scroll ID
sid = page['_scroll_id']
# Get the number of results that we returned in the last scroll
scroll_size = len(page['hits']['hits'])
data = data + page['hits']['hits']
return data
else:
return data
def collect_monitoring_data(es,vdcname,dates=[]):
all = None
for runDate in dates:
esAll = []
index = "{}-{}".format(vdcname,runDate.date().strftime("%Y-%m-%d"))
print("loading data from index",index)
esAll = __collect(es,index,{"query": {"match_all": {}}})
if len(esAll) <= 0:
continue
esAll = list(map(lambda x:x["_source"],esAll))
responses = filter(lambda x:'response.code' in x,esAll)
requests = filter(lambda x:'response.code' not in x,esAll)
responses = pd.DataFrame(responses)
responses = responses[['request.id','response.code','response.length']]
requests = | pd.DataFrame(requests) | pandas.DataFrame |
import pandas as pd
import numpy as np
import math
import copy
import time
import util
import apply_model
import calculus
def produce_cont_relevances(inputDf, model, col):
reles=np.zeros((len(model["conts"][col])+1,len(inputDf)))
reles[0][(inputDf[col]<=model["conts"][col][0][0])] = 1 #d(featpred)/d(pt)
for i in range(len(model["conts"][col])-1):
x = inputDf[col]
x1 = model["conts"][col][i][0]
x2 = model["conts"][col][i+1][0]
subset = (x>=x1) & (x<=x2)
#print(reles[subset][:,1])
reles[i][subset] = (x2 - x[subset])/(x2 - x1) #d(featpred)/d(pt)
reles[i+1][subset] = (x[subset] - x1)/(x2 - x1) #d(featpred)/d(pt)
reles[-2][(inputDf[col]>=model["conts"][col][-1][0])] = 1 #d(featpred)/d(pt)
reles[-1] = 1 - np.sum(reles, axis=0)
return np.transpose(reles) #roundabout way of doing this but the rest of the function doesn't flow as naturally if x and y don't switch places
def produce_cont_relevances_dict(inputDf, model):
opDict = {}
for col in model["conts"]:
opDict[col]=produce_cont_relevances(inputDf, model, col)
return opDict
def produce_cat_relevances(inputDf, model, col):
reles=np.zeros((len(model["cats"][col]["uniques"])+1,len(inputDf)))
skeys = apply_model.get_sorted_keys(model, col)
for i in range(len(skeys)):
reles[i][inputDf[col].isin([skeys[i]])] = 1 #d(featpred)/d(pt)
reles[-1][~inputDf[col].isin(skeys)] = 1 #d(featpred)/d(pt)
return np.transpose(reles) #roundabout way of doing this but the rest of the function doesn't flow as naturally if x and y don't switch places
def produce_cat_relevances_dict(inputDf, model):
opDict = {}
for col in model["cats"]:
opDict[col]=produce_cat_relevances(inputDf, model, col)
return opDict
#Interactions
def interact_relevances(relesA, relesB):
relesA = np.transpose(relesA) #Yes, I know.
relesB = np.transpose(relesB) #Shut up.
relesI = np.zeros((len(relesA)*len(relesB),len(relesA[0])))
for i in range(len(relesA)):
for j in range(len(relesB)):
relesI[i*len(relesB)+j] = relesA[i]*relesB[j]
return np.transpose(relesI) # . . .
def produce_catcat_relevances(inputDf, model, cols):
col1, col2 = cols.split(' X ')
return interact_relevances(produce_cat_relevances(inputDf, model, col1), produce_cat_relevances(inputDf, model, col2))
def produce_catcont_relevances(inputDf, model, cols):
col1, col2 = cols.split(' X ')
return interact_relevances(produce_cat_relevances(inputDf, model, col1), produce_cont_relevances(inputDf, model, col2))
def produce_contcont_relevances(inputDf, model, cols):
col1, col2 = cols.split(' X ')
return interact_relevances(produce_cont_relevances(inputDf, model, col1), produce_cont_relevances(inputDf, model, col2))
def produce_interxn_relevances_dict(inputDf, model):
opDict = {}
for cols in model['catcats']:
opDict[cols] = produce_catcat_relevances(inputDf, model, cols)
for cols in model['catconts']:
opDict[cols] = produce_catcont_relevances(inputDf, model, cols)
for cols in model['contconts']:
opDict[cols] = produce_contcont_relevances(inputDf, model, cols)
return opDict
def sum_and_listify_matrix(a):
return np.array(sum(a)).tolist()
def produce_total_irelevances_dict(releDict):
op = {}
for cols in releDict:
op[cols] = sum_and_listify_matrix(releDict[cols])
return op
def produce_total_relevances_dict(contReleDict, catReleDict):
op = {"conts":{},"cats":{}}
for col in contReleDict:
op["conts"][col] = sum_and_listify_matrix(contReleDict[col])
for col in catReleDict:
op["cats"][col] = sum_and_listify_matrix(catReleDict[col])
print(op)
return op
def produce_wReleDict(releDict, w):
wReleDict = {}
for col in releDict:
wReleDict[col]=w*releDict[col]
return wReleDict
def train_model(inputDf, target, nrounds, lrs, startingModels, weights=None, ignoreCols = [], grad=calculus.Poisson_grad):
models = copy.deepcopy(startingModels)
if weights==None:
weights = np.ones(len(inputDf))
w = np.array(np.transpose(np.matrix(weights)))
sw = sum(weights)
contReleDictList=[]
catReleDictList=[]
totReleDictList=[]
contWReleDictList=[]
catWReleDictList=[]
totWReleDictList=[]
print("initial relevances setup")
for model in models:
cord = produce_cont_relevances_dict(inputDf,model)
card = produce_cat_relevances_dict(inputDf,model)
contReleDictList.append(cord)
catReleDictList.append(card)
totReleDictList.append(produce_total_relevances_dict(cord, card))
cowrd = produce_wReleDict(cord, w)
cawrd = produce_wReleDict(card, w)
contWReleDictList.append(cowrd)
catWReleDictList.append(cawrd)
totWReleDictList.append(produce_total_relevances_dict(cowrd, cawrd))
#Interactions . . .
interxReleDictList = []
interxTotReleDictList = []
interxWReleDictList = []
interxTotWReleDictList = []
for model in models:
ird = produce_interxn_relevances_dict(inputDf, model)
interxReleDictList.append(ird)
interxTotReleDictList.append(produce_total_irelevances_dict(ird))
wird = produce_wReleDict(ird, w)
interxWReleDictList.append(wird)
interxTotWReleDictList.append(produce_total_irelevances_dict(wird))
for i in range(nrounds):
print("epoch: "+str(i+1)+"/"+str(nrounds))
for model in models:
print(model)#apply_model.explain(model)
print("initial pred and effect-gathering")
preds=[]
overallPred=pd.Series([0]*len(inputDf))
contEffectsList=[]
catEffectsList=[]
interxEffectsList=[]
for m in range(len(models)):
contEffects = apply_model.get_effects_of_cont_cols_from_relevance_dict(contReleDictList[m],models[m])
contEffectsList.append(contEffects)
catEffects = apply_model.get_effects_of_cat_cols_from_relevance_dict(catReleDictList[m],models[m])
catEffectsList.append(catEffects)
interxEffects = apply_model.get_effects_of_interxns_from_relevance_dict(interxReleDictList[m],models[m])
interxEffectsList.append(interxEffects)
pred = apply_model.pred_from_effects(models[m]["BASE_VALUE"], len(inputDf), contEffects, catEffects, interxEffects)
preds.append(pred)
overallPred += pred
gradient = grad(overallPred, np.array(inputDf[target])) #d(Loss)/d(pred)
for m in range(len(models)):
model=models[m]
pred=preds[m]
print("adjust conts")
for col in [c for c in model['conts'] if c not in ignoreCols]:#model["conts"]:
effectOfCol = contEffectsList[m][col]
peoc = pred/effectOfCol #d(pred)/d(featpred)
finalGradients = np.matmul(np.array(peoc*gradient),contWReleDictList[m][col]) #d(Loss)/d(pt) = d(Loss)/d(pred) * d(pred)/d(featpred) * d(featpred)/d(pt)
for k in range(len(models[m]['conts'][col])):
totRele = totWReleDictList[m]["conts"][col][k]
if totRele>0:
models[m]["conts"][col][k][1] -= finalGradients[k]*lrs[m]/totRele #and not /sw
print("adjust cats")
for col in [c for c in model['cats'] if c not in ignoreCols]:#model["cats"]:
effectOfCol = catEffectsList[m][col]
peoc = pred/effectOfCol #d(pred)/d(featpred)
finalGradients = np.matmul(np.array(peoc*gradient),catWReleDictList[m][col]) #d(Loss)/d(pt) = d(Loss)/d(pred) * d(pred)/d(featpred) * d(featpred)/d(pt)
skeys = apply_model.get_sorted_keys(model, col)
#all the uniques . . .
for k in range(len(skeys)):
totRele = totWReleDictList[m]["cats"][col][k]
if totRele>0:
models[m]["cats"][col]["uniques"][skeys[k]] -= finalGradients[k]*lrs[m]/totRele #and not /sw
# . . . and "OTHER"
totRele = totWReleDictList[m]["cats"][col][-1]
if totRele>0:
models[m]["cats"][col]["OTHER"] -= finalGradients[-1]*lrs[m]/totRele #and not /sw
print('adjust catcats')
for cols in [c for c in model['catcats'] if c not in ignoreCols]:#model['catcats']:
col1, col2 = cols.split(' X ')
effectOfCols = interxEffectsList[m][cols]
peoc = pred/effectOfCols #d(pred)/d(featpred)
finalGradients = np.matmul(np.array(peoc*gradient),interxWReleDictList[m][cols]) #d(Loss)/d(pt) = d(Loss)/d(pred) * d(pred)/d(featpred) * d(featpred)/d(pt)
skeys1 = apply_model.get_sorted_keys(model, col1)
skeys2 = apply_model.get_sorted_keys(model, col2)
for i in range(len(skeys1)):
for j in range(len(skeys2)):
totRele = interxTotWReleDictList[m][cols][i*(len(skeys2)+1)+j]
if totRele>0:
models[m]["catcats"][cols]["uniques"][skeys1[i]]['uniques'][skeys2[i]] -= finalGradients[i*(len(skeys2)+1)+j]*lrs[m]/totRele #and not /sw
totRele = interxTotWReleDictList[m][cols][i*(len(skeys2)+1)+len(skeys2)]
if totRele>0:
models[m]['catcats'][cols]["uniques"][skeys1[i]]['OTHER'] -= finalGradients[i*(len(skeys2)+1)+len(skeys2)]*lrs[m]/totRele #and not /sw
for j in range(len(skeys2)):
totRele = interxTotWReleDictList[m][cols][len(skeys1)*(len(skeys2)+1)+j]
if totRele>0:
models[m]["catcats"][cols]['OTHER']['uniques'][skeys2[i]] -= finalGradients[len(skeys1)*(len(skeys2)+1)+j]*lrs[m]/totRele #and not /sw
totRele = interxTotWReleDictList[m][cols][-1]
if totRele>0:
models[m]['catcats'][cols]['OTHER']['OTHER'] -= finalGradients[-1]*lrs[m]/totRele #and not /sw
print('adjust catconts')
for cols in [c for c in model['catconts'] if c not in ignoreCols]:#model['catconts']
col1, col2 = cols.split(' X ')
effectOfCols = interxEffectsList[m][cols]
peoc = pred/effectOfCols #d(pred)/d(featpred)
finalGradients = np.matmul(np.array(peoc*gradient),interxWReleDictList[m][cols]) #d(Loss)/d(pt) = d(Loss)/d(pred) * d(pred)/d(featpred) * d(featpred)/d(pt)
skeys = apply_model.get_sorted_keys(model, col1)
for i in range(len(skeys)):
for j in range(len(models[m]['conts'][col2])):
totRele = interxTotWReleDictList[m][cols][i*(len(models[m]['conts'][col2])+1)+j]
if totRele>0:
models[m]['catconts'][cols]['uniques'][skeys[i]][j][1] -= finalGradients[i*(len(models[m]['conts'][col2])+1)+j]*lrs[m]/totRele #and not /sw
for j in range(len(models[m]['conts'][col2])):
totRele = interxTotWReleDictList[m][cols][len(skeys)*(len(models[m]['conts'][col2])+1)+j]
if totRele>0:
models[m]['catconts'][cols]['OTHER'][j][1] -= finalGradients[len(skeys)*(len(models[m]['conts'][col2])+1)+j]*lrs[m]/totRele #and not /sw
for cols in [c for c in model['contconts'] if c not in ignoreCols]:#model['contconts']
col1, col2 = cols.split(' X ')
effectOfCols = interxEffectsList[m][cols]
peoc = pred/effectOfCols #d(pred)/d(featpred)
finalGradients = np.matmul(np.array(peoc*gradient),interxWReleDictList[m][cols]) #d(Loss)/d(pt) = d(Loss)/d(pred) * d(pred)/d(featpred) * d(featpred)/d(pt)
for i in range(len(models[m]['conts'][col1])):
for j in range(len(models[m]['conts'][col2])):
totRele = interxTotWReleDictList[m][cols][i*(len(models[m]['conts'][col2])+1)+j]
if totRele>0:
models[m]['contconts'][cols][i][1][j][1] -= finalGradients[i*(len(models[m]['conts'][col2])+1)+j]*lrs[m]/totRele #and not /sw
return models
if __name__ == '__main__':
df = pd.DataFrame({"cat1":['a','a','a','b','b','b','q','q','q','q'],'cat2':['c','d','q','c','d','q','c','d','q','q'],"y":[1,2,3,4,5,6,7,8,9,10]})
models = [{"BASE_VALUE":1.0,"conts":{}, "cats":{"cat1":{"uniques":{"a":1,"b":1,},"OTHER":1}, "cat2":{"uniques":{"c":1,"d":1},"OTHER":1}}, 'catcats':{'cat1 X cat2':{'uniques':{'a':{'uniques':{'c':1,'d':1},'OTHER':1},'b':{'uniques':{'c':1,'d':1},'OTHER':1}},'OTHER':{'uniques':{'c':1,'d':1},'OTHER':1}}}, 'catconts':{}, 'contconts':{}}]
print(produce_catcat_relevances(df, models[0], "cat1 X cat2"))
df = pd.DataFrame({"cat1":['a','a','a','b','b','b','q','q','q','q'],'cat2':['c','d','q','c','d','q','c','d','q','q'],"y":[1,2,3,4,5,6,7,8,9,10]})
models = [{"BASE_VALUE":1.0,"conts":{}, "cats":{"cat1":{"uniques":{"a":1,"b":1,},"OTHER":1}, "cat2":{"uniques":{"c":1,"d":1},"OTHER":1}}, 'catcats':{'cat1 X cat2':{'uniques':{'a':{'uniques':{'c':1,'d':1},'OTHER':1},'b':{'uniques':{'c':1,'d':2},'OTHER':1}},'OTHER':{'uniques':{'c':1,'d':1},'OTHER':1}}}, 'catconts':{}, 'contconts':{}}]
reles = produce_catcat_relevances(df, models[0], "cat1 X cat2")
print(apply_model.get_effect_of_this_catcat_from_relevances(reles, models[0], "cat1 X cat2"))
df = pd.DataFrame({"cat1":['a','a','a','b','b','b','q','q','q','q'],'cat2':['c','d','q','c','d','q','c','d','q','q'],"y":[1,1,1,1,2,1,1,1,1,1]})
models = [{"BASE_VALUE":1.0,"conts":{}, "cats":{"cat1":{"uniques":{"a":1,"b":1,},"OTHER":1}, "cat2":{"uniques":{"c":1,"d":1},"OTHER":1}}, 'catcats':{'cat1 X cat2':{'uniques':{'a':{'uniques':{'c':1,'d':1},'OTHER':1},'b':{'uniques':{'c':1,'d':1},'OTHER':1}},'OTHER':{'uniques':{'c':1,'d':1},'OTHER':1}}}, 'catconts':{}, 'contconts':{}}]
newModels = train_model(df, "y",50, [0.4], models, ignoreCols = ['cat1','cat2'])
print(newModels)
df = pd.DataFrame({"cat1":['a','a','a','b','b','b','q','q','q','q','q'],'cont1':[1,2,3,1,2,3,1,2,3,1.5,np.nan],"y":[1,2,3,4,5,6,7,8,9,10,11]})
models = [{"BASE_VALUE":1.0,"conts":{'cont1':[[1,1],[2,1],[3,1]]}, "cats":{"cat1":{"uniques":{"a":1,"b":1},"OTHER":1}}, 'catcats':{}, 'catconts':{"cat1 X cont1":{"uniques":{"a":[[1,1],[2,1],[3,1]],"b":[[1,1],[2,1],[3,1]]},"OTHER":[[1,1],[2,1],[3,1]]}}, 'contconts':{}}]
print(produce_catcont_relevances(df, models[0], "cat1 X cont1"))
df = pd.DataFrame({"cat1":['a','a','a','b','b','b','q','q','q','q','q'],'cont1':[1,2,3,1,2,3,1,2,3,1.5,np.nan],"y":[1,2,3,4,5,6,7,8,9,10,11]})
models = [{"BASE_VALUE":1.0,"conts":{'cont1':[[1,1],[2,1],[3,1]]}, "cats":{"cat1":{"uniques":{"a":1,"b":1},"OTHER":1}}, 'catcats':{}, 'catconts':{"cat1 X cont1":{"uniques":{"a":[[1,1],[2,1],[3,1]],"b":[[1,1],[2,4],[3,1]]},"OTHER":[[1,1],[2,1],[3,1]]}}, 'contconts':{}}]
reles = produce_catcont_relevances(df, models[0], "cat1 X cont1")
print(apply_model.get_effect_of_this_catcont_from_relevances(reles, models[0], "cat1 X cont1"))
df = pd.DataFrame({"cat1":['a','a','a','b','b','b','q','q','q','q','q'],'cont1':[1,2,3,1,2,3,1,2,3,1.5,np.nan],"y":[1,1,1,1,1,2,1,1,1,1,1]})
models = [{"BASE_VALUE":1.0,"conts":{'cont1':[[1,1],[2,1],[3,1]]}, "cats":{"cat1":{"uniques":{"a":1,"b":1},"OTHER":1}}, 'catcats':{}, 'catconts':{"cat1 X cont1":{"uniques":{"a":[[1,1],[2,1],[3,1]],"b":[[1,1],[2,1],[3,1]]},"OTHER":[[1,1],[2,1],[3,1]]}}, 'contconts':{}}]
newModels = train_model(df, "y",50, [0.4], models, ignoreCols = ['cat1','cont1'])
print(newModels)
df = pd.DataFrame({"cont1":[1,1,1,2,2,2,3,3,3,1.5,np.nan],'cont2':[1,2,3,1,2,3,1,2,3,1.5,np.nan],"y":[1,2,3,4,5,6,7,8,9,10,11]})
models = [{"BASE_VALUE":1.0,"conts":{'cont1':[[1,1],[2,1],[3,1]],'cont2':[[1,1],[2,1],[3,1]]}, "cats":{}, 'catcats':{}, 'catconts':{}, 'contconts':{'cont1 X cont2': [[1,[[1,1],[2,1],[3,1]]],[2,[[1,1],[2,1],[3,1]]],[3,[[1,1],[2,1],[3,1]]]]} }]
print(produce_contcont_relevances(df, models[0], "cont1 X cont2"))
df = pd.DataFrame({"cont1":[1,1,1,2,2,2,3,3,3,1.5,np.nan],'cont2':[1,2,3,1,2,3,1,2,3,1.5,np.nan],"y":[1,2,3,4,5,6,7,8,9,10,11]})
models = [{"BASE_VALUE":1.0,"conts":{'cont1':[[1,1],[2,1],[3,1]],'cont2':[[1,1],[2,1],[3,1]]}, "cats":{}, 'catcats':{}, 'catconts':{}, 'contconts':{'cont1 X cont2': [[1,[[1,1],[2,1],[3,1]]],[2,[[1,1],[2,1],[3,1]]],[3,[[1,1],[2,1],[3,5]]]]} }]
reles = produce_contcont_relevances(df, models[0], "cont1 X cont2")
print(apply_model.get_effect_of_this_contcont_from_relevances(reles, models[0], "cont1 X cont2"))
df = pd.DataFrame({"cont1":[1,1,1,2,2,2,3,3,3,1.5,np.nan],'cont2':[1,2,3,1,2,3,1,2,3,1.5,np.nan],"y":[1,1,1, 1,1,1, 1,1,2, 1,1]})
models = [{"BASE_VALUE":1.0,"conts":{'cont1':[[1,1],[2,1],[3,1]],'cont2':[[1,1],[2,1],[3,1]]}, "cats":{}, 'catcats':{}, 'catconts':{}, 'contconts':{'cont1 X cont2': [[1,[[1,1],[2,1],[3,1]]],[2,[[1,1],[2,1],[3,1]]],[3,[[1,1],[2,1],[3,1]]]]} }]
newModels = train_model(df, "y",50, [0.4], models, ignoreCols = ['cont1','cont2'])
print(newModels)
if False:
df = | pd.DataFrame({"x":[1,2,3,4,5,6,7,8,9],"y":[1,2,3,4,5,6,7,8,9]}) | pandas.DataFrame |
import pandas as pd
import datetime
import os
# install pandas, openpyxl, xlrd
def xlsx_merger():
path = os.getcwd()
files = os.listdir(path)
files_xls = [f for f in files if f[-4:] == 'xlsx']
print(files_xls)
# read them in
excels = [pd.ExcelFile(name) for name in files_xls]
# turn them into dataframes
frames = [x.parse(x.sheet_names[0], header=None, index_col=None)
for x in excels]
# delete the first row for all frames except the first
# i.e. remove the header row -- assumes it's the first
frames[1:] = [df[1:] for df in frames[1:]]
# concatenate them..
combined = pd.concat(frames)
# write it out
combined.to_excel("Envios - " + str(start.strftime("%Y-%m-%d - %H")
) + "h.xlsx", header=False, index=False)
def xls_merger():
path = os.getcwd()
files = os.listdir(path)
files_xls = [f for f in files if f[-3:] == 'xls']
print(files_xls)
# read them in
excels = [ | pd.ExcelFile(name) | pandas.ExcelFile |
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
import scipy as sp
from scipy import linalg, optimize
from scipy.optimize import minimize, minimize_scalar, rosen, rosen_der, brentq, fminbound, curve_fit
import math
from scipy.stats import norm
import pylab as P
import matplotlib.mlab as mlab
import beadpy
def segmentplotter(table,maxrate, ymin, ymax, legloc = 1, scale = 10):
table = table[abs(table['rate']) < maxrate]
x = table['x1']
y = table['displacement']
size = abs(table['rate'])/scale
fig, ax = plt.subplots(figsize=(10, 7.5))
ax.scatter(x, y, s = size, alpha=0.5, color='magenta', edgecolors='black')
bins = np.linspace(0, maxrate, 4)
if scale < 5:
firstbinsize = 1
elif scale >=5:
firstbinsize = 25
l1 = ax.scatter([],[], s=(firstbinsize)/scale, c = 'magenta')
l2 = ax.scatter([],[], s=bins[1] / scale, c = 'magenta')
l3 = ax.scatter([],[], s=bins[2] / scale, c = 'magenta')
l4 = ax.scatter([],[], s=bins[3] / scale,c = 'magenta')
labels = [firstbinsize, int(bins[1]), int(bins[2]), int(bins[3])]
ax.legend([l1, l2, l3, l4],
labels,
frameon = True,
fontsize = 16,
handlelength = 2,
loc = legloc,
borderpad = 0.5,
handletextpad = 1,
title ='Rate (nt/s)',
scatterpoints = 1)
ax.set_xlabel('Segment start time (s)', fontsize=16)
ax.set_ylabel('Segment length (nt)', fontsize=16)
ax.set_xlim((0, max(x)))
ax.set_ylim((ymin, ymax))
fig.tight_layout(pad=2);
ax.grid(True)
fig.savefig('segments.png', dpi = 300)
#plt.clf()
return ax;
def filterer(resultstable, segmentstable, minrate, maxrate, mindisplacement, starttime, endtime):
filtsegments = segmentstable[(abs(segmentstable['rate']) > minrate)
& (segmentstable['rate'] < maxrate)
& (segmentstable['displacement'] >= mindisplacement)
& (segmentstable['x1'] > starttime)
& (segmentstable['x1'] < endtime)]
filtresults = resultstable[resultstable.trajectory.isin(filtsegments.trajectory)]
filtresults.to_csv('filtresults.csv', index = False, float_format='%.4f')
filtsegments.to_csv('filtsegments.csv', index = False)
return filtresults, filtsegments;
def trajectory_plotter(resultstable, exampletraj, sigma = 500, method = ('global', 'table'), sigma_start = 10, sigma_end = 100, eventregion = (200,500), segmenttable = 0):
exampletraj = int(exampletraj)
fig, ax = plt.subplots(figsize = (10, 7.5))
ax.plot(resultstable['time'][resultstable['trajectory'] == exampletraj],
resultstable['nucleotides'][resultstable['trajectory'] == exampletraj]/1000,
lw = 3)
ax.set_xlabel("Time (s)", fontsize=16)
ax.set_ylabel("Nucleotides synthesised (kb)", fontsize=16)
ax.set_xlim((-50,resultstable['time'][resultstable['trajectory'] == exampletraj].max()+50))
ax.set_ylim((-0.5 + resultstable['nucleotides'][resultstable['trajectory'] == exampletraj].min()/1000,0.5 + resultstable['nucleotides'][resultstable['trajectory'] == exampletraj].max()/1000))
if not method == 'nofit':
if method == ('global', 'table'):
exampletrajseg = beadpy.ratefinder(resultstable[resultstable['trajectory']==exampletraj], segtable = segmenttable, sigmaval = sigma)
elif method == ('global', 'region'):
exampletrajseg = beadpy.segment_finder(resultstable[(resultstable['time']>eventregion[0]) & (resultstable['time'] < eventregion[1])], sigma = sigma, traj = exampletraj)
elif method == ('global', 'whole'):
exampletrajseg = beadpy.segment_finder(resultstable, sigma = sigma, traj = exampletraj)
elif method == ('auto', 'table'):
test, sigma = beadpy.segment_finder(resultstable, method = 'auto', traj = int(exampletraj), returnsigma = 'yes', sigma_start = sigma_start, sigma_end = sigma_end)
exampletrajseg = beadpy.ratefinder(resultstable[resultstable['trajectory']==exampletraj], segtable = segmenttable, sigmaval = sigma)
elif method == ('auto', 'region'):
test, sigma = beadpy.segment_finder(resultstable, method = 'auto', traj = int(exampletraj), returnsigma = 'yes', sigma_start = sigma_start, sigma_end = sigma_end)
exampletrajseg = beadpy.segment_finder(resultstable[(resultstable['time']>eventregion[0]) & (resultstable['time'] < eventregion[1])], sigma = sigma, traj = exampletraj)
elif method == ('auto', 'whole'):
exampletrajseg, sigma = beadpy.segment_finder(resultstable, method = 'auto', traj = int(exampletraj), returnsigma = 'yes', sigma_start = sigma_start, sigma_end = sigma_end)
fig.suptitle('Trajectory '+str(exampletraj)+', sigma '+str(int(sigma)), fontsize = 16)
for row_index, row in exampletrajseg[exampletrajseg.trajectory==exampletraj].iterrows():
ax.plot([row['x1'], row['x2']], [row['y1']/1000, row['y2']/1000],'k-', lw=2, color='Magenta', linestyle='-')
else:
fig.suptitle('Trajectory '+str(exampletraj), fontsize = 16)
ax.tick_params(axis='both', labelsize=14)
fig.tight_layout(pad=4)
if not method == 'nofit':
fig.savefig('traj_'+str(exampletraj)+'_sigma_'+str(sigma)+'.png', dpi = 300)
return exampletrajseg
else:
fig.savefig('traj_'+str(exampletraj)+'.png', dpi = 300)
def weighted_avg_and_std(values, weights):
average = np.average(values, weights=weights)
variance = np.average((values-average)**2, weights=weights) # Fast and numerically precise
return (average, math.sqrt(variance))
def ratefinder(restable, segtable, sigmaval = 300):
#Filter the results to only be between the first data point of the filtered segments, and the last for each trajectory.
restable = restable[restable.trajectory.isin(segtable.trajectory)]
segtable = segtable[segtable['trajectory'].isin(restable['trajectory'])]
groupedsegs = segtable.groupby(['trajectory'], as_index=False)
starttimes = groupedsegs['x1'].min()
endtimes = groupedsegs['x2'].max()
startendtimes = pd.merge(left=starttimes, right = endtimes, how='left', left_on='trajectory', right_on='trajectory')
mergedfiltresults = pd.merge(left=restable,right=startendtimes, how='left', left_on='trajectory', right_on='trajectory')
finefiltresults = mergedfiltresults[(mergedfiltresults['time'] >= mergedfiltresults['x1'])
& (mergedfiltresults['time'] <= mergedfiltresults['x2'])]
#Do change point analysis on these events:
segmentsfine = beadpy.segment_finder(finefiltresults, sigma = sigmaval)
return segmentsfine;
def sigmaval_finder(restable, sigma_start = 0, sigma_end = 150):
restable = restable.reset_index(drop=True)
sigmaregion = restable[(restable.time > sigma_start) & (restable.time < sigma_end)]
sigmavals = sigmaregion.groupby('trajectory')['nucleotides'].apply(lambda x:x.rolling(center=False,window=20).std().mean())
sigmavals = sigmavals[np.logical_not(np.isnan(sigmavals))]
trajectories = sigmavals.index.tolist()
sigmavals = sigmavals.tolist()
return sigmavals, trajectories;
def ratefinder_autosigma(restable, segtable, sigma_start, sigma_end):
segtable = segtable[segtable['trajectory'].isin(restable['trajectory'])]
restable = restable[restable.trajectory.isin(segtable.trajectory)]
sigmavals, trajectories = sigmaval_finder(restable, sigma_start, sigma_end)
restable = restable[restable.trajectory.isin(trajectories)]
segtable = segtable[segtable.trajectory.isin(trajectories)]
groupedsegs = segtable.groupby(['trajectory'], as_index=False)
starttimes = groupedsegs['x1'].min()
endtimes = groupedsegs['x2'].max()
startendtimes = pd.merge(left=starttimes, right = endtimes, how='left', left_on='trajectory', right_on='trajectory')
mergedfiltresults = | pd.merge(left=restable,right=startendtimes, how='left', left_on='trajectory', right_on='trajectory') | pandas.merge |
import numpy as np
import pandas as pd
from primitive_interfaces.unsupervised_learning import UnsupervisedLearnerPrimitiveBase
from typing import NamedTuple, Sequence
import copy
def isCat_95in10(col):
"""
hardcoded rule for identifying (integer/string) categorical column
"""
return col.value_counts().head(10).sum() / float(col.count()) > .95
Input = pd.DataFrame
Output = pd.DataFrame
Params = NamedTuple('Params', [
('mapping', dict),
('all_columns', list),
('empty_columns', list),
('textmapping', dict)
])
## reference: https://github.com/scikit-learn/scikit-learn/issues/8136
class Label_encoder(object):
def __init__(self):
self.class_index = None
def fit_pd(self, df, cols=[]):
'''
fit all columns in the df or specific list.
generate a dict:
{feature1:{label1:1,label2:2}, feature2:{label1:1,label2:2}...}
'''
if len(cols) == 0:
cols = df.columns
self.class_index = {}
for f in cols:
uf = df[f].unique()
self.class_index[f] = {}
index = 1
for item in uf:
self.class_index[f][item] = index
index += 1
def transform_pd(self,df,cols=[]):
'''
transform all columns in the df or specific list from lable to index, return an update dataframe.
'''
newdf = copy.deepcopy(df)
if len(cols) == 0:
cols = df.columns
for f in cols:
if f in self.class_index:
newdf[f] = df[f].apply(lambda d: self.__update_label(f,d))
return newdf
def get_params(self):
return self.class_index
def set_params(self, textmapping):
self.class_index = textmapping
def __update_label(self,f,x):
'''
update the label to index, if not found in the dict, add and update the dict.
'''
try:
return self.class_index[f][x]
except:
self.class_index[f][x] = max(self.class_index[f].values())+1
return self.class_index[f][x]
class Encoder(UnsupervisedLearnerPrimitiveBase[Input, Output, Params]):
"""
An one-hot encoder, which
1. is given rules or values to identify categorical columns/
- categorical_features:
'95in10': is category if 95% of the column fall into 10 values.
- tex2int: if non-categorical text/string columns be mapped to integers
- n_limit: max number of distinct values to one-hot encode,
remaining values with fewer occurence are put in [colname]_other_ column.
2. feed in data by set_training_data, then apply fit() function to tune the encoder.
3. produce(): input data would be encoded and return.
"""
def __repr__(self):
return "%s(%r)" % ('Encoder', self.__dict__)
def __init__(self, *, categorical_features='95in10', text2int=True, n_limit=10) -> None:
self.categorical_features = categorical_features
self.n_limit = n_limit
self.text2int = text2int
#
self.textmapping = None
#
self.mapping = None
self.all_columns = []
self.empty_columns = []
self.training_inputs = None
self.fitted = False
def __column_features(self, col, n_limit):
topn = col.dropna().unique()
if n_limit:
if col.nunique() > n_limit:
topn = col.value_counts().head(n_limit).index
return col.name, list(topn)+['other_']
def __process(self, col, categorical_features, n_limit):
if categorical_features == '95in10':
# if empty column (all missing/NaN)
if col.count() == 0:
print('Warning:',col.name,'is an empty column.')
print('The encoder will discard it.')
self.empty_columns.append(col.name)
return
# if dtype = integer
elif col.dtype.kind in np.typecodes['AllInteger']+'u':
if isCat_95in10(col):
return self.__column_features(col.astype(str), n_limit)
# if dtype = category
elif col.dtype.name == 'category':
return self.__column_features(col, n_limit)
# for the rest other than float
elif col.dtype.kind not in np.typecodes['AllFloat']:
if isCat_95in10(col):
return self.__column_features(col, n_limit)
return
def get_params(self) -> Params:
return Params(mapping=self.mapping, all_columns=self.all_columns, empty_columns=self.empty_columns, textmapping=self.textmapping)
def set_params(self, *, params: Params) -> None:
self.fitted = True
self.mapping = params.mapping
self.all_columns = params.all_columns
self.empty_columns = params.empty_columns
self.textmapping = params.textmapping
def set_training_data(self, *, inputs: Sequence[Input]):
self.training_inputs = inputs
self.fitted = False
def fit(self, *, timeout:float = None, iterations: int = None) -> None:
"""
Need training data from set_training_data first.
The encoder would record categorical columns identified and
the corresponding (with top n occurrence) column values to
one-hot encode later in the produce step.
"""
if self.fitted:
return
if self.training_inputs is None:
raise ValueError('Missing training(fitting) data.')
data_copy = self.training_inputs.copy()
self.all_columns = set(data_copy.columns)
if self.categorical_features == '95in10':
idict = {}
for column_name in data_copy:
col = data_copy[column_name]
p = self.__process(col, self.categorical_features, self.n_limit)
if p:
idict[p[0]] = p[1]
self.mapping = idict
#
if self.text2int:
texts = data_copy.drop(self.mapping.keys(),axis=1)
texts = texts.select_dtypes(include=[object])
le = Label_encoder()
le.fit_pd(texts)
self.textmapping = le.get_params()
#
self.fitted = True
def produce(self, *, inputs: Sequence[Input], timeout:float = None, iterations: int = None):
"""
Convert and output the input data into encoded format,
using the trained (fitted) encoder.
Notice that a [colname]_other_ column is always kept for
one-hot encoded columns.
Missing(NaN) cells in a column one-hot encoded would give
out a row of all-ZERO columns for the target column.
"""
if isinstance(inputs, pd.DataFrame):
data_copy = inputs.copy()
else:
data_copy = inputs[0].copy()
data_enc = data_copy[list(self.mapping.keys())]
data_else = data_copy.drop(self.mapping.keys(),axis=1)
set_columns = set(data_copy.columns)
if set_columns != self.all_columns:
raise ValueError('Columns(features) fed at produce() differ from fitted data.')
data_enc = data_copy[list(self.mapping.keys())]
data_else = data_copy.drop(self.mapping.keys(),axis=1)
res = []
for column_name in data_enc:
col = data_enc[column_name]
col.is_copy = False
chg_t = lambda x: str(int(x)) if type(x) is not str else x
col[col.notnull()] = col[col.notnull()].apply(chg_t)
chg_v = lambda x: 'other_' if (x and x not in self.mapping[col.name]) else x
col = col.apply(chg_v)
encoded = pd.get_dummies(col, dummy_na=True, prefix=col.name)
missed = (["%s_%s"%(col.name,str(i)) for i in self.mapping[col.name] if
"%s_%s"%(col.name,str(i)) not in list(encoded.columns)])
for m in missed:
encoded[m] = 0
res.append(encoded)
data_else.drop(self.empty_columns, axis=1, inplace=True)
if self.text2int:
texts = data_else.select_dtypes([object])
le = Label_encoder()
le.set_params(self.textmapping)
data_else[texts.columns] = le.transform_pd(texts)
#for column_name in data_else:
# if data_else[column_name].dtype.kind not in np.typecodes['AllInteger']+'uf':
# data_else[column_name] = text2int(data_else[column_name])
res.append(data_else)
result = pd.concat(res, axis=1)
return result
# example
if __name__ == '__main__':
enc = Encoder()
df = | pd.DataFrame({'A':[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],'B':[1,2,3,4,5,1,2,3,4,5,1,2,3,4,5]}) | pandas.DataFrame |
from copy import deepcopy
import datetime
import inspect
import pydoc
import numpy as np
import pytest
from pandas.compat import PY37
from pandas.util._test_decorators import async_mark, skip_if_no
import pandas as pd
from pandas import Categorical, DataFrame, Series, compat, date_range, timedelta_range
import pandas._testing as tm
class TestDataFrameMisc:
@pytest.mark.parametrize("attr", ["index", "columns"])
def test_copy_index_name_checking(self, float_frame, attr):
# don't want to be able to modify the index stored elsewhere after
# making a copy
ind = getattr(float_frame, attr)
ind.name = None
cp = float_frame.copy()
getattr(cp, attr).name = "foo"
assert getattr(float_frame, attr).name is None
def test_getitem_pop_assign_name(self, float_frame):
s = float_frame["A"]
assert s.name == "A"
s = float_frame.pop("A")
assert s.name == "A"
s = float_frame.loc[:, "B"]
assert s.name == "B"
s2 = s.loc[:]
assert s2.name == "B"
def test_get_value(self, float_frame):
for idx in float_frame.index:
for col in float_frame.columns:
result = float_frame._get_value(idx, col)
expected = float_frame[col][idx]
tm.assert_almost_equal(result, expected)
def test_add_prefix_suffix(self, float_frame):
with_prefix = float_frame.add_prefix("foo#")
expected = pd.Index([f"foo#{c}" for c in float_frame.columns])
tm.assert_index_equal(with_prefix.columns, expected)
with_suffix = float_frame.add_suffix("#foo")
expected = pd.Index([f"{c}#foo" for c in float_frame.columns])
tm.assert_index_equal(with_suffix.columns, expected)
with_pct_prefix = float_frame.add_prefix("%")
expected = pd.Index([f"%{c}" for c in float_frame.columns])
tm.assert_index_equal(with_pct_prefix.columns, expected)
with_pct_suffix = float_frame.add_suffix("%")
expected = pd.Index([f"{c}%" for c in float_frame.columns])
tm.assert_index_equal(with_pct_suffix.columns, expected)
def test_get_axis(self, float_frame):
f = float_frame
assert f._get_axis_number(0) == 0
assert f._get_axis_number(1) == 1
assert f._get_axis_number("index") == 0
assert f._get_axis_number("rows") == 0
assert f._get_axis_number("columns") == 1
assert f._get_axis_name(0) == "index"
assert f._get_axis_name(1) == "columns"
assert f._get_axis_name("index") == "index"
assert f._get_axis_name("rows") == "index"
assert f._get_axis_name("columns") == "columns"
assert f._get_axis(0) is f.index
assert f._get_axis(1) is f.columns
with pytest.raises(ValueError, match="No axis named"):
f._get_axis_number(2)
with pytest.raises(ValueError, match="No axis.*foo"):
f._get_axis_name("foo")
with pytest.raises(ValueError, match="No axis.*None"):
f._get_axis_name(None)
with pytest.raises(ValueError, match="No axis named"):
f._get_axis_number(None)
def test_keys(self, float_frame):
getkeys = float_frame.keys
assert getkeys() is float_frame.columns
def test_column_contains_raises(self, float_frame):
with pytest.raises(TypeError, match="unhashable type: 'Index'"):
float_frame.columns in float_frame
def test_tab_completion(self):
# DataFrame whose columns are identifiers shall have them in __dir__.
df = pd.DataFrame([list("abcd"), list("efgh")], columns=list("ABCD"))
for key in list("ABCD"):
assert key in dir(df)
assert isinstance(df.__getitem__("A"), pd.Series)
# DataFrame whose first-level columns are identifiers shall have
# them in __dir__.
df = pd.DataFrame(
[list("abcd"), list("efgh")],
columns=pd.MultiIndex.from_tuples(list(zip("ABCD", "EFGH"))),
)
for key in list("ABCD"):
assert key in dir(df)
for key in list("EFGH"):
assert key not in dir(df)
assert isinstance(df.__getitem__("A"), pd.DataFrame)
def test_not_hashable(self):
empty_frame = DataFrame()
df = DataFrame([1])
msg = "'DataFrame' objects are mutable, thus they cannot be hashed"
with pytest.raises(TypeError, match=msg):
hash(df)
with pytest.raises(TypeError, match=msg):
hash(empty_frame)
def test_column_name_contains_unicode_surrogate(self):
# GH 25509
colname = "\ud83d"
df = DataFrame({colname: []})
# this should not crash
assert colname not in dir(df)
assert df.columns[0] == colname
def test_new_empty_index(self):
df1 = DataFrame(np.random.randn(0, 3))
df2 = DataFrame(np.random.randn(0, 3))
df1.index.name = "foo"
assert df2.index.name is None
def test_array_interface(self, float_frame):
with np.errstate(all="ignore"):
result = np.sqrt(float_frame)
assert isinstance(result, type(float_frame))
assert result.index is float_frame.index
assert result.columns is float_frame.columns
tm.assert_frame_equal(result, float_frame.apply(np.sqrt))
def test_get_agg_axis(self, float_frame):
cols = float_frame._get_agg_axis(0)
assert cols is float_frame.columns
idx = float_frame._get_agg_axis(1)
assert idx is float_frame.index
msg = r"Axis must be 0 or 1 \(got 2\)"
with pytest.raises(ValueError, match=msg):
float_frame._get_agg_axis(2)
def test_nonzero(self, float_frame, float_string_frame):
empty_frame = DataFrame()
assert empty_frame.empty
assert not float_frame.empty
assert not float_string_frame.empty
# corner case
df = DataFrame({"A": [1.0, 2.0, 3.0], "B": ["a", "b", "c"]}, index=np.arange(3))
del df["A"]
assert not df.empty
def test_iteritems(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["a", "a", "b"])
for k, v in df.items():
assert isinstance(v, DataFrame._constructor_sliced)
def test_items(self):
# GH 17213, GH 13918
cols = ["a", "b", "c"]
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=cols)
for c, (k, v) in zip(cols, df.items()):
assert c == k
assert isinstance(v, Series)
assert (df[k] == v).all()
def test_iter(self, float_frame):
assert tm.equalContents(list(float_frame), float_frame.columns)
def test_iterrows(self, float_frame, float_string_frame):
for k, v in float_frame.iterrows():
exp = float_frame.loc[k]
tm.assert_series_equal(v, exp)
for k, v in float_string_frame.iterrows():
exp = float_string_frame.loc[k]
tm.assert_series_equal(v, exp)
def test_iterrows_iso8601(self):
# GH 19671
s = DataFrame(
{
"non_iso8601": ["M1701", "M1802", "M1903", "M2004"],
"iso8601": date_range("2000-01-01", periods=4, freq="M"),
}
)
for k, v in s.iterrows():
exp = s.loc[k]
tm.assert_series_equal(v, exp)
def test_iterrows_corner(self):
# gh-12222
df = DataFrame(
{
"a": [datetime.datetime(2015, 1, 1)],
"b": [None],
"c": [None],
"d": [""],
"e": [[]],
"f": [set()],
"g": [{}],
}
)
expected = Series(
[datetime.datetime(2015, 1, 1), None, None, "", [], set(), {}],
index=list("abcdefg"),
name=0,
dtype="object",
)
_, result = next(df.iterrows())
tm.assert_series_equal(result, expected)
def test_itertuples(self, float_frame):
for i, tup in enumerate(float_frame.itertuples()):
s = DataFrame._constructor_sliced(tup[1:])
s.name = tup[0]
expected = float_frame.iloc[i, :].reset_index(drop=True)
tm.assert_series_equal(s, expected)
df = DataFrame(
{"floats": np.random.randn(5), "ints": range(5)}, columns=["floats", "ints"]
)
for tup in df.itertuples(index=False):
assert isinstance(tup[1], int)
df = | DataFrame(data={"a": [1, 2, 3], "b": [4, 5, 6]}) | pandas.DataFrame |
import ast
import json
import os
import sys
import uuid
import lxml
import networkx as nx
import pandas as pd
import geopandas as gpd
import pytest
from pandas.testing import assert_frame_equal, assert_series_equal
from shapely.geometry import LineString, Polygon, Point
from genet.core import Network
from genet.inputs_handler import matsim_reader
from tests.test_outputs_handler_matsim_xml_writer import network_dtd, schedule_dtd
from genet.schedule_elements import Route, Service, Schedule
from genet.utils import plot, spatial
from genet.inputs_handler import read
from tests.fixtures import assert_semantically_equal, route, stop_epsg_27700, network_object_from_test_data, \
full_fat_default_config_path, correct_schedule, vehicle_definitions_config_path
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
pt2matsim_network_test_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "network.xml"))
pt2matsim_schedule_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "schedule.xml"))
puma_network_test_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "puma", "network.xml"))
puma_schedule_test_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "puma", "schedule.xml"))
simplified_network = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "simplified_network", "network.xml"))
simplified_schedule = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "simplified_network", "schedule.xml"))
network_link_attrib_text_missing = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "network_link_attrib_text_missing.xml"))
@pytest.fixture()
def network1():
n1 = Network('epsg:27700')
n1.add_node('101982',
{'id': '101982',
'x': '528704.1425925883',
'y': '182068.78193707118',
'lon': -0.14625948709424305,
'lat': 51.52287873323954,
's2_id': 5221390329378179879})
n1.add_node('101986',
{'id': '101986',
'x': '528835.203274008',
'y': '182006.27331298392',
'lon': -0.14439428709377497,
'lat': 51.52228713323965,
's2_id': 5221390328605860387})
n1.add_link('0', '101982', '101986',
attribs={'id': '0',
'from': '101982',
'to': '101986',
'freespeed': 4.166666666666667,
'capacity': 600.0,
'permlanes': 1.0,
'oneway': '1',
'modes': ['car'],
's2_from': 5221390329378179879,
's2_to': 5221390328605860387,
'length': 52.765151087870265,
'attributes': {'osm:way:access': {'name': 'osm:way:access',
'class': 'java.lang.String',
'text': 'permissive'},
'osm:way:highway': {'name': 'osm:way:highway',
'class': 'java.lang.String',
'text': 'unclassified'},
'osm:way:id': {'name': 'osm:way:id',
'class': 'java.lang.Long',
'text': '26997928'},
'osm:way:name': {'name': 'osm:way:name',
'class': 'java.lang.String',
'text': 'Brunswick Place'}}})
return n1
@pytest.fixture()
def network2():
n2 = Network('epsg:4326')
n2.add_node('101982',
{'id': '101982',
'x': -0.14625948709424305,
'y': 51.52287873323954,
'lon': -0.14625948709424305,
'lat': 51.52287873323954,
's2_id': 5221390329378179879})
n2.add_node('101990',
{'id': '101990',
'x': -0.14770188709624754,
'y': 51.5205729332399,
'lon': -0.14770188709624754,
'lat': 51.5205729332399,
's2_id': 5221390304444511271})
n2.add_link('0', '101982', '101990',
attribs={'id': '0',
'from': '101982',
'to': '101990',
'freespeed': 4.166666666666667,
'capacity': 600.0,
'permlanes': 1.0,
'oneway': '1',
'modes': ['car'],
's2_from': 5221390329378179879,
's2_to': 5221390304444511271,
'length': 52.765151087870265,
'attributes': {'osm:way:access': {'name': 'osm:way:access',
'class': 'java.lang.String',
'text': 'permissive'},
'osm:way:highway': {'name': 'osm:way:highway',
'class': 'java.lang.String',
'text': 'unclassified'},
'osm:way:id': {'name': 'osm:way:id',
'class': 'java.lang.Long',
'text': '26997928'},
'osm:way:name': {'name': 'osm:way:name',
'class': 'java.lang.String',
'text': 'Brunswick Place'}}})
return n2
def test_network_graph_initiates_as_not_simplififed():
n = Network('epsg:27700')
assert not n.graph.graph['simplified']
def test__repr__shows_graph_info_and_schedule_info():
n = Network('epsg:4326')
assert 'instance at' in n.__repr__()
assert 'graph' in n.__repr__()
assert 'schedule' in n.__repr__()
def test__str__shows_info():
n = Network('epsg:4326')
assert 'Graph info' in n.__str__()
assert 'Schedule info' in n.__str__()
def test_reproject_changes_x_y_values_for_all_nodes(network1):
network1.reproject('epsg:4326')
nodes = dict(network1.nodes())
correct_nodes = {
'101982': {'id': '101982', 'x': -0.14625948709424305, 'y': 51.52287873323954, 'lon': -0.14625948709424305,
'lat': 51.52287873323954, 's2_id': 5221390329378179879},
'101986': {'id': '101986', 'x': -0.14439428709377497, 'y': 51.52228713323965, 'lon': -0.14439428709377497,
'lat': 51.52228713323965, 's2_id': 5221390328605860387}}
target_change_log = pd.DataFrame(
{'timestamp': {3: '2020-07-09 19:50:51', 4: '2020-07-09 19:50:51'}, 'change_event': {3: 'modify', 4: 'modify'},
'object_type': {3: 'node', 4: 'node'}, 'old_id': {3: '101982', 4: '101986'},
'new_id': {3: '101982', 4: '101986'}, 'old_attributes': {
3: "{'id': '101982', 'x': '528704.1425925883', 'y': '182068.78193707118', 'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}",
4: "{'id': '101986', 'x': '528835.203274008', 'y': '182006.27331298392', 'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387}"},
'new_attributes': {
3: "{'id': '101982', 'x': -0.14625948709424305, 'y': 51.52287873323954, 'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}",
4: "{'id': '101986', 'x': -0.14439428709377497, 'y': 51.52228713323965, 'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387}"},
'diff': {3: [('change', 'x', ('528704.1425925883', -0.14625948709424305)),
('change', 'y', ('182068.78193707118', 51.52287873323954))],
4: [('change', 'x', ('528835.203274008', -0.14439428709377497)),
('change', 'y', ('182006.27331298392', 51.52228713323965))]}}
)
assert_semantically_equal(nodes, correct_nodes)
for i in [3, 4]:
assert_semantically_equal(ast.literal_eval(target_change_log.loc[i, 'old_attributes']),
ast.literal_eval(network1.change_log.loc[i, 'old_attributes']))
assert_semantically_equal(ast.literal_eval(target_change_log.loc[i, 'new_attributes']),
ast.literal_eval(network1.change_log.loc[i, 'new_attributes']))
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'diff']
assert_frame_equal(network1.change_log[cols_to_compare].tail(2), target_change_log[cols_to_compare],
check_dtype=False)
def test_reproject_delegates_reprojection_to_schedules_own_method(network1, route, mocker):
mocker.patch.object(Schedule, 'reproject')
network1.schedule = Schedule(epsg='epsg:27700', services=[Service(id='id', routes=[route])])
network1.reproject('epsg:4326')
network1.schedule.reproject.assert_called_once_with('epsg:4326', 1)
def test_reproject_updates_graph_crs(network1):
network1.reproject('epsg:4326')
assert network1.graph.graph['crs'] == {'init': 'epsg:4326'}
def test_reprojecting_links_with_geometries():
n = Network('epsg:27700')
n.add_nodes({'A': {'x': -82514.72274, 'y': 220772.02798},
'B': {'x': -82769.25894, 'y': 220773.0637}})
n.add_links({'1': {'from': 'A', 'to': 'B',
'geometry': LineString([(-82514.72274, 220772.02798),
(-82546.23894, 220772.88254),
(-82571.87107, 220772.53339),
(-82594.92709, 220770.68385),
(-82625.33255, 220770.45579),
(-82631.26842, 220770.40158),
(-82669.7309, 220770.04349),
(-82727.94946, 220770.79793),
(-82757.38528, 220771.75412),
(-82761.82425, 220771.95614),
(-82769.25894, 220773.0637)])}})
n.reproject('epsg:2157')
geometry_coords = list(n.link('1')['geometry'].coords)
assert round(geometry_coords[0][0], 7) == 532006.5605980
assert round(geometry_coords[0][1], 7) == 547653.3751768
assert round(geometry_coords[-1][0], 7) == 531753.4315189
assert round(geometry_coords[-1][1], 7) == 547633.5224837
def test_adding_the_same_networks():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_right.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_right.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_right.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_left.add(n_right)
assert_semantically_equal(dict(n_left.nodes()), {
'1': {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118, 'lon': -0.14625948709424305,
'lat': 51.52287873323954, 's2_id': 5221390329378179879},
'2': {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392, 'lon': -0.14439428709377497,
'lat': 51.52228713323965, 's2_id': 5221390328605860387}})
assert_semantically_equal(dict(n_left.links()), {'1': {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}})
def test_adding_the_same_networks_but_with_differing_projections():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_right.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_right.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_right.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right.reproject('epsg:4326')
n_left.add(n_right)
assert_semantically_equal(dict(n_left.nodes()), {
'1': {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118, 'lon': -0.14625948709424305,
'lat': 51.52287873323954, 's2_id': 5221390329378179879},
'2': {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392, 'lon': -0.14439428709377497,
'lat': 51.52228713323965, 's2_id': 5221390328605860387}})
assert_semantically_equal(dict(n_left.links()), {'1': {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}})
def test_adding_networks_with_clashing_node_ids():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_right.add_node('10', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_right.add_node('20', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_right.add_link('1', '10', '20', attribs={'modes': ['walk']})
n_left.add(n_right)
assert_semantically_equal(dict(n_left.nodes()), {
'1': {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118, 'lon': -0.14625948709424305,
'lat': 51.52287873323954, 's2_id': 5221390329378179879},
'2': {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392, 'lon': -0.14439428709377497,
'lat': 51.52228713323965, 's2_id': 5221390328605860387}})
assert_semantically_equal(dict(n_left.links()), {'1': {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}})
def test_adding_networks_with_clashing_link_ids():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_right.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_right.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_right.add_link('10', '1', '2', attribs={'modes': ['walk']})
n_left.add(n_right)
assert_semantically_equal(dict(n_left.nodes()), {
'1': {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118, 'lon': -0.14625948709424305,
'lat': 51.52287873323954, 's2_id': 5221390329378179879},
'2': {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392, 'lon': -0.14439428709377497,
'lat': 51.52228713323965, 's2_id': 5221390328605860387}})
assert_semantically_equal(dict(n_left.links()), {'1': {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}})
def test_adding_networks_with_clashing_multiindices():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', 0, attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', 0, attribs={'modes': ['walk', 'bike']})
n_left.add(n_right)
assert len(list(n_left.nodes())) == 2
assert n_left.node('1') == {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}
assert n_left.node('2') == {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387}
assert len(n_left.link_id_mapping) == 2
assert n_left.link('1') == {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}
assert n_left.graph['1']['2'][0] == {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}
def test_adding_disjoint_networks_with_unique_ids():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_right.add_node('10', {'id': '1', 'x': 1, 'y': 1,
'lon': 1, 'lat': 1, 's2_id': 1})
n_right.add_node('20', {'id': '2', 'x': 1, 'y': 1,
'lon': 1, 'lat': 1, 's2_id': 2})
n_right.add_link('100', '10', '20', attribs={'modes': ['walk']})
n_left.add(n_right)
assert_semantically_equal(dict(n_left.nodes()), {'10': {'id': '1', 'x': 1, 'y': 1, 'lon': 1, 'lat': 1, 's2_id': 1},
'20': {'id': '2', 'x': 1, 'y': 1, 'lon': 1, 'lat': 1, 's2_id': 2},
'1': {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954,
's2_id': 5221390329378179879},
'2': {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965,
's2_id': 5221390328605860387}})
assert_semantically_equal(dict(n_left.links()), {'100': {'modes': ['walk'], 'from': '10', 'to': '20', 'id': '100'},
'1': {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}})
def test_adding_disjoint_networks_with_clashing_ids():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_right.add_node('1', {'id': '1', 'x': 1, 'y': 1,
'lon': 1, 'lat': 1, 's2_id': 1})
n_right.add_node('2', {'id': '2', 'x': 1, 'y': 1,
'lon': 1, 'lat': 1, 's2_id': 2})
n_right.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_left.add(n_right)
assert len(list(n_left.nodes())) == 4
assert n_left.node('1') == {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}
assert n_left.node('2') == {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387}
assert len(n_left.link_id_mapping) == 2
assert n_left.link('1') == {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}
def test_adding_simplified_network_and_not_throws_error():
n = Network('epsg:2770')
m = Network('epsg:2770')
m.graph.graph['simplified'] = True
with pytest.raises(RuntimeError) as error_info:
n.add(m)
assert "cannot add" in str(error_info.value)
def test_print_shows_info(mocker):
mocker.patch.object(Network, 'info')
n = Network('epsg:27700')
n.print()
n.info.assert_called_once()
def test_plot_delegates_to_util_plot_plot_graph_routes(mocker):
mocker.patch.object(plot, 'plot_graph_routes')
n = Network('epsg:27700')
n.plot()
plot.plot_graph_routes.assert_called_once()
def test_plot_graph_delegates_to_util_plot_plot_graph(mocker):
mocker.patch.object(plot, 'plot_graph')
n = Network('epsg:27700')
n.plot_graph()
plot.plot_graph.assert_called_once()
def test_plot_schedule_delegates_to_util_plot_plot_non_routed_schedule_graph(mocker, network_object_from_test_data):
mocker.patch.object(plot, 'plot_non_routed_schedule_graph')
n = network_object_from_test_data
n.plot_schedule()
plot.plot_non_routed_schedule_graph.assert_called_once()
def test_attempt_to_simplify_already_simplified_network_throws_error():
n = Network('epsg:27700')
n.graph.graph["simplified"] = True
with pytest.raises(RuntimeError) as error_info:
n.simplify()
assert "cannot simplify" in str(error_info.value)
def test_simplifing_puma_network_results_in_correct_record_of_removed_links_and_expected_graph_data():
n = read.read_matsim(path_to_network=puma_network_test_file, epsg='epsg:27700',
path_to_schedule=puma_schedule_test_file)
link_ids_pre_simplify = set(dict(n.links()).keys())
n.simplify()
assert n.is_simplified()
link_ids_post_simplify = set(dict(n.links()).keys())
assert link_ids_post_simplify & link_ids_pre_simplify
new_links = link_ids_post_simplify - link_ids_pre_simplify
deleted_links = link_ids_pre_simplify - link_ids_post_simplify
assert set(n.link_simplification_map.keys()) == deleted_links
assert set(n.link_simplification_map.values()) == new_links
assert (set(n.link_id_mapping.keys()) & new_links) == new_links
report = n.generate_validation_report()
assert report['routing']['services_have_routes_in_the_graph']
assert report['schedule']['schedule_level']['is_valid_schedule']
def test_simplified_network_saves_to_correct_dtds(tmpdir, network_dtd, schedule_dtd):
n = read.read_matsim(path_to_network=puma_network_test_file, epsg='epsg:27700',
path_to_schedule=puma_schedule_test_file)
n.simplify()
n.write_to_matsim(tmpdir)
generated_network_file_path = os.path.join(tmpdir, 'network.xml')
xml_obj = lxml.etree.parse(generated_network_file_path)
assert network_dtd.validate(xml_obj), \
'Doc generated at {} is not valid against DTD due to {}'.format(generated_network_file_path,
network_dtd.error_log.filter_from_errors())
generated_schedule_file_path = os.path.join(tmpdir, 'schedule.xml')
xml_obj = lxml.etree.parse(generated_schedule_file_path)
assert schedule_dtd.validate(xml_obj), \
'Doc generated at {} is not valid against DTD due to {}'.format(generated_network_file_path,
schedule_dtd.error_log.filter_from_errors())
def test_simplifying_network_with_multi_edges_resulting_in_multi_paths():
n = Network('epsg:27700')
n.add_nodes({
'n_-1': {'x': -1, 'y': -1, 's2_id': -1},
'n_0': {'x': 0, 'y': 0, 's2_id': 0},
'n_1': {'x': 1, 'y': 1, 's2_id': 1},
'n_2': {'x': 2, 'y': 2, 's2_id': 2},
'n_3': {'x': 3, 'y': 3, 's2_id': 3},
'n_4': {'x': 4, 'y': 4, 's2_id': 4},
'n_5': {'x': 5, 'y': 5, 's2_id': 5},
'n_6': {'x': 6, 'y': 5, 's2_id': 6},
})
n.add_links({
'l_-1': {'from': 'n_-1', 'to': 'n_1', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_0': {'from': 'n_0', 'to': 'n_1', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_1': {'from': 'n_1', 'to': 'n_2', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_2': {'from': 'n_1', 'to': 'n_2', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_3': {'from': 'n_2', 'to': 'n_3', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_4': {'from': 'n_2', 'to': 'n_3', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_5': {'from': 'n_3', 'to': 'n_4', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_6': {'from': 'n_3', 'to': 'n_4', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_7': {'from': 'n_4', 'to': 'n_5', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_8': {'from': 'n_4', 'to': 'n_6', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}}
})
n.simplify()
assert set(n.link_simplification_map) == {'l_4', 'l_1', 'l_5', 'l_3', 'l_6', 'l_2'}
def test_reading_back_simplified_network():
# simplified networks have additional geometry attribute and some of their attributes are composite, e.g. links
# now refer to a number of osm ways each with a unique id
n = read.read_matsim(path_to_network=simplified_network, epsg='epsg:27700',
path_to_schedule=simplified_schedule)
number_of_simplified_links = 659
links_with_geometry = n.extract_links_on_edge_attributes(conditions={'geometry': lambda x: True})
assert len(links_with_geometry) == number_of_simplified_links
for link in links_with_geometry:
attribs = n.link(link)
if 'attributes' in attribs:
assert not 'geometry' in attribs['attributes']
for k, v in attribs['attributes'].items():
if isinstance(v['text'], str):
assert not ',' in v['text']
def test_network_with_missing_link_attribute_elem_text_is_read_and_able_to_save_again(tmpdir):
n = read.read_matsim(path_to_network=network_link_attrib_text_missing, epsg='epsg:27700')
n.write_to_matsim(tmpdir)
def test_node_attribute_data_under_key_returns_correct_pd_series_with_nested_keys():
n = Network('epsg:27700')
n.add_node(1, {'a': {'b': 1}})
n.add_node(2, {'a': {'b': 4}})
output_series = n.node_attribute_data_under_key(key={'a': 'b'})
assert_series_equal(output_series, pd.Series({1: 1, 2: 4}))
def test_node_attribute_data_under_key_returns_correct_pd_series_with_flat_keys():
n = Network('epsg:27700')
n.add_node(1, {'b': 1})
n.add_node(2, {'b': 4})
output_series = n.node_attribute_data_under_key(key='b')
assert_series_equal(output_series, pd.Series({1: 1, 2: 4}))
def test_node_attribute_data_under_keys(network1):
df = network1.node_attribute_data_under_keys(['x', 'y'])
df_to_compare = pd.DataFrame({'x': {'101982': '528704.1425925883', '101986': '528835.203274008'},
'y': {'101982': '182068.78193707118', '101986': '182006.27331298392'}})
assert_frame_equal(df, df_to_compare)
def test_node_attribute_data_under_keys_with_named_index(network1):
df = network1.node_attribute_data_under_keys(['x', 'y'], index_name='index')
assert df.index.name == 'index'
def test_node_attribute_data_under_keys_generates_key_for_nested_data(network1):
network1.add_node('1', {'key': {'nested_value': {'more_nested': 4}}})
df = network1.node_attribute_data_under_keys([{'key': {'nested_value': 'more_nested'}}])
assert isinstance(df, pd.DataFrame)
assert 'key::nested_value::more_nested' in df.columns
def test_node_attribute_data_under_keys_returns_dataframe_with_one_col_if_passed_one_key(network1):
df = network1.node_attribute_data_under_keys(['x'], index_name='index')
assert isinstance(df, pd.DataFrame)
assert len(df.columns) == 1
def test_link_attribute_data_under_key_returns_correct_pd_series_with_nested_keys():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'a': {'b': 1}})
n.add_link('1', 1, 2, attribs={'a': {'b': 4}})
output_series = n.link_attribute_data_under_key(key={'a': 'b'})
assert_series_equal(output_series, pd.Series({'0': 1, '1': 4}))
def test_link_attribute_data_under_key_returns_correct_pd_series_with_flat_keys():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'b': 1})
n.add_link('1', 1, 2, attribs={'b': 4})
output_series = n.link_attribute_data_under_key(key='b')
assert_series_equal(output_series, pd.Series({'0': 1, '1': 4}))
def test_link_attribute_data_under_keys(network1):
df = network1.link_attribute_data_under_keys(['modes', 'freespeed', 'capacity', 'permlanes'])
df_to_compare = pd.DataFrame({'modes': {'0': ['car']}, 'freespeed': {'0': 4.166666666666667},
'capacity': {'0': 600.0}, 'permlanes': {'0': 1.0}})
assert_frame_equal(df, df_to_compare)
def test_link_attribute_data_under_keys_with_named_index(network1):
df = network1.link_attribute_data_under_keys(['modes', 'freespeed', 'capacity', 'permlanes'], index_name='index')
assert df.index.name == 'index'
def test_link_attribute_data_under_keys_returns_dataframe_with_one_col_if_passed_one_key(network1):
df = network1.link_attribute_data_under_keys(['modes'])
assert isinstance(df, pd.DataFrame)
assert len(df.columns) == 1
def test_link_attribute_data_under_keys_generates_key_for_nested_data(network1):
df = network1.link_attribute_data_under_keys([{'attributes': {'osm:way:access': 'text'}}])
assert isinstance(df, pd.DataFrame)
assert 'attributes::osm:way:access::text' in df.columns
def test_add_node_adds_node_to_graph_with_attribs():
n = Network('epsg:27700')
n.add_node(1, {'a': 1})
assert n.graph.has_node(1)
assert n.node(1) == {'a': 1}
def test_add_node_adds_node_to_graph_without_attribs():
n = Network('epsg:27700')
n.add_node(1)
assert n.node(1) == {}
assert n.graph.has_node(1)
def test_add_multiple_nodes():
n = Network('epsg:27700')
reindexing_dict, actual_nodes_added = n.add_nodes({1: {'x': 1, 'y': 2}, 2: {'x': 2, 'y': 2}})
assert n.graph.has_node(1)
assert n.node(1) == {'x': 1, 'y': 2, 'id': 1}
assert n.graph.has_node(2)
assert n.node(2) == {'x': 2, 'y': 2, 'id': 2}
assert reindexing_dict == {}
def test_add_nodes_with_clashing_ids():
n = Network('epsg:27700')
n.add_node(1, {})
reindexing_dict, actual_nodes_added = n.add_nodes({1: {'x': 1, 'y': 2}, 2: {'x': 2, 'y': 2}})
assert n.graph.has_node(1)
assert n.node(1) == {}
assert n.graph.has_node(2)
assert n.node(2) == {'x': 2, 'y': 2, 'id': 2}
assert 1 in reindexing_dict
assert n.graph.has_node(reindexing_dict[1])
assert n.node(reindexing_dict[1]) == {'x': 1, 'y': 2, 'id': reindexing_dict[1]}
def test_add_nodes_with_multiple_clashing_ids():
n = Network('epsg:27700')
n.add_node(1, {})
n.add_node(2, {})
assert n.graph.has_node(1)
assert n.node(1) == {}
assert n.graph.has_node(2)
assert n.node(2) == {}
reindexing_dict, actual_nodes_added = n.add_nodes({1: {'x': 1, 'y': 2}, 2: {'x': 2, 'y': 2}})
assert 1 in reindexing_dict
assert n.graph.has_node(reindexing_dict[1])
assert n.node(reindexing_dict[1]) == {'x': 1, 'y': 2, 'id': reindexing_dict[1]}
assert 2 in reindexing_dict
assert n.graph.has_node(reindexing_dict[2])
assert n.node(reindexing_dict[2]) == {'x': 2, 'y': 2, 'id': reindexing_dict[2]}
def test_add_edge_generates_a_link_id_and_delegated_to_add_link_id(mocker):
mocker.patch.object(Network, 'add_link')
mocker.patch.object(Network, 'generate_index_for_edge', return_value='12345')
n = Network('epsg:27700')
n.add_edge(1, 2, attribs={'a': 1})
Network.generate_index_for_edge.assert_called_once()
Network.add_link.assert_called_once_with('12345', 1, 2, None, {'a': 1}, False)
def test_add_edge_generates_a_link_id_with_specified_multiidx(mocker):
mocker.patch.object(Network, 'add_link')
mocker.patch.object(Network, 'generate_index_for_edge', return_value='12345')
n = Network('epsg:27700')
n.add_edge(1, 2, multi_edge_idx=10, attribs={'a': 1})
Network.generate_index_for_edge.assert_called_once()
Network.add_link.assert_called_once_with('12345', 1, 2, 10, {'a': 1}, False)
def test_adding_multiple_edges():
n = Network('epsg:27700')
n.add_edges([{'from': 1, 'to': 2}, {'from': 2, 'to': 3}])
assert n.graph.has_edge(1, 2)
assert n.graph.has_edge(2, 3)
assert '0' in n.link_id_mapping
assert '1' in n.link_id_mapping
if n.link_id_mapping['0'] == {'from': 1, 'to': 2, 'multi_edge_idx': 0}:
assert n.link_id_mapping['1'] == {'from': 2, 'to': 3, 'multi_edge_idx': 0}
elif n.link_id_mapping['1'] == {'from': 1, 'to': 2, 'multi_edge_idx': 0}:
assert n.link_id_mapping['0'] == {'from': 2, 'to': 3, 'multi_edge_idx': 0}
else:
raise AssertionError()
def test_adding_multiple_edges_between_same_nodes():
n = Network('epsg:27700')
n.add_edges([{'from': 1, 'to': 2}, {'from': 1, 'to': 2}, {'from': 1, 'to': 2}, {'from': 2, 'to': 3}])
assert n.graph.has_edge(1, 2)
assert n.graph.number_of_edges(1, 2) == 3
assert n.graph.has_edge(2, 3)
assert len(n.link_id_mapping) == 4
def test_add_link_adds_edge_to_graph_with_attribs():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'a': 1})
assert n.graph.has_edge(1, 2)
assert '0' in n.link_id_mapping
assert n.edge(1, 2) == {0: {'a': 1, 'from': 1, 'id': '0', 'to': 2}}
def test_add_link_adds_edge_to_graph_without_attribs():
n = Network('epsg:27700')
n.add_link('0', 1, 2)
n.graph.has_edge(1, 2)
assert '0' in n.link_id_mapping
assert n.link_id_mapping['0'] == {'from': 1, 'to': 2, 'multi_edge_idx': 0}
def test_adding_multiple_links():
n = Network('epsg:27700')
n.add_links({'0': {'from': 1, 'to': 2}, '1': {'from': 2, 'to': 3}})
assert n.graph.has_edge(1, 2)
assert n.graph.has_edge(2, 3)
assert '0' in n.link_id_mapping
assert '1' in n.link_id_mapping
assert n.link_id_mapping['0'] == {'from': 1, 'to': 2, 'multi_edge_idx': 0}
assert n.link_id_mapping['1'] == {'from': 2, 'to': 3, 'multi_edge_idx': 0}
def test_adding_multiple_links_with_id_clashes():
n = Network('epsg:27700')
n.add_link('0', 10, 20)
assert '0' in n.link_id_mapping
reindexing_dict, links_and_attribs = n.add_links({'0': {'from': 1, 'to': 2}, '1': {'from': 2, 'to': 3}})
assert '1' in n.link_id_mapping
assert '0' in reindexing_dict
assert len(n.link_id_mapping) == 3
assert_semantically_equal(links_and_attribs[reindexing_dict['0']], {'from': 1, 'to': 2, 'id': reindexing_dict['0']})
assert_semantically_equal(links_and_attribs['1'], {'from': 2, 'to': 3, 'id': '1'})
def test_adding_multiple_links_with_multiple_id_clashes():
n = Network('epsg:27700')
n.add_link('0', 10, 20)
n.add_link('1', 10, 20)
assert '0' in n.link_id_mapping
assert '1' in n.link_id_mapping
reindexing_dict, links_and_attribs = n.add_links({'0': {'from': 1, 'to': 2}, '1': {'from': 2, 'to': 3}})
assert '0' in reindexing_dict
assert '1' in reindexing_dict
assert len(n.link_id_mapping) == 4
assert_semantically_equal(links_and_attribs[reindexing_dict['0']], {'from': 1, 'to': 2, 'id': reindexing_dict['0']})
assert_semantically_equal(links_and_attribs[reindexing_dict['1']], {'from': 2, 'to': 3, 'id': reindexing_dict['1']})
def test_adding_loads_of_multiple_links_between_same_nodes():
n = Network('epsg:27700')
reindexing_dict, links_and_attribs = n.add_links({i: {'from': 1, 'to': 2} for i in range(10)})
assert_semantically_equal(links_and_attribs, {i: {'from': 1, 'to': 2, 'id': i} for i in range(10)})
assert_semantically_equal(n.link_id_mapping, {i: {'from': 1, 'to': 2, 'multi_edge_idx': i} for i in range(10)})
def test_adding_multiple_links_with_multi_idx_clashes():
n = Network('epsg:27700')
n.add_link('0', 1, 2)
n.add_link('1', 1, 2)
assert '0' in n.link_id_mapping
assert '1' in n.link_id_mapping
n.add_links({'2': {'from': 1, 'to': 2}, '3': {'from': 1, 'to': 2}, '4': {'from': 2, 'to': 3}})
assert n.link_id_mapping['2'] == {'from': 1, 'to': 2, 'multi_edge_idx': 2}
assert n.link_id_mapping['3'] == {'from': 1, 'to': 2, 'multi_edge_idx': 3}
assert n.link_id_mapping['4'] == {'from': 2, 'to': 3, 'multi_edge_idx': 0}
def test_adding_multiple_links_with_id_and_multi_idx_clashes():
n = Network('epsg:27700')
n.add_link('0', 1, 2)
n.add_link('1', 1, 2)
assert '0' in n.link_id_mapping
assert '1' in n.link_id_mapping
reindexing_dict, links_and_attribs = n.add_links(
{'0': {'from': 1, 'to': 2}, '1': {'from': 1, 'to': 2}, '2': {'from': 2, 'to': 3}})
assert '0' in reindexing_dict
assert '1' in reindexing_dict
assert len(n.link_id_mapping) == 5
assert_semantically_equal(n.link_id_mapping[reindexing_dict['0']], {'from': 1, 'to': 2, 'multi_edge_idx': 2})
assert_semantically_equal(n.link_id_mapping[reindexing_dict['1']], {'from': 1, 'to': 2, 'multi_edge_idx': 3})
def test_adding_multiple_links_missing_some_from_nodes():
n = Network('epsg:27700')
with pytest.raises(RuntimeError) as error_info:
n.add_links({'0': {'to': 2}, '1': {'from': 2, 'to': 3}})
assert "You are trying to add links which are missing `from` (origin) nodes" in str(error_info.value)
def test_adding_multiple_links_missing_from_nodes_completely():
n = Network('epsg:27700')
with pytest.raises(RuntimeError) as error_info:
n.add_links({'0': {'to': 2}, '1': {'to': 3}})
assert "You are trying to add links which are missing `from` (origin) nodes" in str(error_info.value)
def test_adding_multiple_links_missing_some_to_nodes():
n = Network('epsg:27700')
with pytest.raises(RuntimeError) as error_info:
n.add_links({'0': {'from': 2}, '1': {'from': 2, 'to': 3}})
assert "You are trying to add links which are missing `to` (destination) nodes" in str(error_info.value)
def test_adding_multiple_links_missing_to_nodes_completely():
n = Network('epsg:27700')
with pytest.raises(RuntimeError) as error_info:
n.add_links({'0': {'from': 2}, '1': {'from': 2}})
assert "You are trying to add links which are missing `to` (destination) nodes" in str(error_info.value)
def test_adding_links_with_different_non_overlapping_attributes():
# generates a nan attribute for link attributes
n = Network('epsg:27700')
reindexing_dict, links_and_attributes = n.add_links({
'2': {'from': 1, 'to': 2, 'speed': 20},
'3': {'from': 1, 'to': 2, 'capacity': 123},
'4': {'from': 2, 'to': 3, 'modes': [1, 2, 3]}})
assert reindexing_dict == {}
assert_semantically_equal(links_and_attributes, {
'2': {'id': '2', 'from': 1, 'to': 2, 'speed': 20},
'3': {'id': '3', 'from': 1, 'to': 2, 'capacity': 123},
'4': {'id': '4', 'from': 2, 'to': 3, 'modes': [1, 2, 3]}})
def test_adding_multiple_links_to_same_edge_clashing_with_existing_edge():
n = Network('epsg:27700')
n.add_link(link_id='0', u='2', v='2', attribs={'speed': 20})
n.add_links({'1': {'from': '2', 'to': '2', 'something': 20},
'2': {'from': '2', 'to': '2', 'capacity': 123}})
assert_semantically_equal(dict(n.links()), {'0': {'speed': 20, 'from': '2', 'to': '2', 'id': '0'},
'1': {'from': '2', 'to': '2', 'something': 20.0, 'id': '1'},
'2': {'from': '2', 'to': '2', 'capacity': 123.0, 'id': '2'}})
assert_semantically_equal(n.link_id_mapping, {'0': {'from': '2', 'to': '2', 'multi_edge_idx': 0},
'1': {'from': '2', 'to': '2', 'multi_edge_idx': 1},
'2': {'from': '2', 'to': '2', 'multi_edge_idx': 2}})
def test_network_modal_subgraph_using_general_subgraph_on_link_attribs():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike']})
n.add_link('1', 2, 3, attribs={'modes': ['car']})
n.add_link('2', 2, 3, attribs={'modes': ['bike']})
car_graph = n.subgraph_on_link_conditions(conditions={'modes': 'car'}, mixed_dtypes=True)
assert list(car_graph.edges) == [(1, 2, 0), (2, 3, 0)]
def test_modes():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike']})
n.add_link('1', 2, 3, attribs={'modes': ['car']})
n.add_link('2', 2, 3, attribs={'modes': ['bike']})
n.add_link('3', 2, 3, attribs={})
assert n.modes() == {'car', 'bike'}
def test_network_modal_subgraph_using_specific_modal_subgraph_method_single_mode():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike']})
n.add_link('1', 2, 3, attribs={'modes': ['car']})
n.add_link('2', 2, 3, attribs={'modes': ['bike']})
car_graph = n.modal_subgraph(modes='car')
assert list(car_graph.edges) == [(1, 2, 0), (2, 3, 0)]
def test_network_modal_subgraph_using_specific_modal_subgraph_method_several_modes():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike']})
n.add_link('1', 2, 3, attribs={'modes': ['car']})
n.add_link('2', 2, 3, attribs={'modes': ['bike']})
n.add_link('3', 2, 3, attribs={'modes': ['walk']})
car_bike_graph = n.modal_subgraph(modes=['car', 'bike'])
assert list(car_bike_graph.edges) == [(1, 2, 0), (2, 3, 0), (2, 3, 1)]
def test_links_on_modal_condition():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike']})
n.add_link('1', 2, 3, attribs={'modes': ['car']})
n.add_link('2', 2, 3, attribs={'modes': ['bike']})
n.add_link('3', 2, 3, attribs={'modes': ['walk']})
car_links = n.links_on_modal_condition(modes=['car'])
assert set(car_links) == {'0', '1'}
def test_nodes_on_modal_condition():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike']})
n.add_link('1', 2, 3, attribs={'modes': ['car']})
n.add_link('2', 2, 3, attribs={'modes': ['bike']})
n.add_link('3', 2, 3, attribs={'modes': ['walk']})
car_nodes = n.nodes_on_modal_condition(modes=['car'])
assert set(car_nodes) == {1, 2, 3}
test_geojson = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "test_geojson.geojson"))
def test_nodes_on_spatial_condition_with_geojson(network_object_from_test_data):
network_object_from_test_data.add_node('1', {'id': '1', 'x': 508400, 'y': 162050})
nodes = network_object_from_test_data.nodes_on_spatial_condition(test_geojson)
assert set(nodes) == {'21667818', '25508485'}
def test_nodes_on_spatial_condition_with_shapely_geom(network_object_from_test_data):
region = Polygon([(-0.1487016677856445, 51.52556684350165), (-0.14063358306884766, 51.5255134425896),
(-0.13865947723388672, 51.5228700191647), (-0.14093399047851562, 51.52006622056997),
(-0.1492595672607422, 51.51974577545329), (-0.1508045196533203, 51.52276321095246),
(-0.1487016677856445, 51.52556684350165)])
network_object_from_test_data.add_node('1', {'id': '1', 'x': 508400, 'y': 162050})
nodes = network_object_from_test_data.nodes_on_spatial_condition(region)
assert set(nodes) == {'21667818', '25508485'}
def test_nodes_on_spatial_condition_with_s2_region(network_object_from_test_data):
region = '48761ad04d,48761ad054,48761ad05c,48761ad061,48761ad085,48761ad08c,48761ad094,48761ad09c,48761ad0b,48761ad0d,48761ad0f,48761ad14,48761ad182c,48761ad19c,48761ad1a4,48761ad1ac,48761ad1b4,48761ad1bac,48761ad3d7f,48761ad3dc,48761ad3e4,48761ad3ef,48761ad3f4,48761ad3fc,48761ad41,48761ad43,48761ad5d,48761ad5e4,48761ad5ec,48761ad5fc,48761ad7,48761ad803,48761ad81c,48761ad824,48761ad82c,48761ad9d,48761ad9e4,48761ad9e84,48761ad9fc,48761ada04,48761ada0c,48761b2804,48761b2814,48761b281c,48761b283,48761b2844,48761b284c,48761b2995,48761b29b4,48761b29bc,48761b29d,48761b29f,48761b2a04'
network_object_from_test_data.add_node(
'1', {'id': '1', 'x': 508400, 'y': 162050, 's2_id': spatial.generate_index_s2(51.3472033, 0.4449167)})
nodes = network_object_from_test_data.nodes_on_spatial_condition(region)
assert set(nodes) == {'21667818', '25508485'}
def test_links_on_spatial_condition_with_geojson(network_object_from_test_data):
network_object_from_test_data.add_node('1', {'id': '1', 'x': 508400, 'y': 162050})
network_object_from_test_data.add_link('2', u='21667818', v='1')
links = network_object_from_test_data.links_on_spatial_condition(test_geojson)
assert set(links) == {'1', '2'}
def test_links_on_spatial_condition_with_shapely_geom(network_object_from_test_data):
region = Polygon([(-0.1487016677856445, 51.52556684350165), (-0.14063358306884766, 51.5255134425896),
(-0.13865947723388672, 51.5228700191647), (-0.14093399047851562, 51.52006622056997),
(-0.1492595672607422, 51.51974577545329), (-0.1508045196533203, 51.52276321095246),
(-0.1487016677856445, 51.52556684350165)])
network_object_from_test_data.add_node('1', {'id': '1', 'x': 508400, 'y': 162050})
network_object_from_test_data.add_link('2', u='21667818', v='1')
links = network_object_from_test_data.links_on_spatial_condition(region)
assert set(links) == {'1', '2'}
def test_links_on_spatial_condition_with_s2_region(network_object_from_test_data):
region = '48761ad04d,48761ad054,48761ad05c,48761ad061,48761ad085,48761ad08c,48761ad094,48761ad09c,48761ad0b,48761ad0d,48761ad0f,48761ad14,48761ad182c,48761ad19c,48761ad1a4,48761ad1ac,48761ad1b4,48761ad1bac,48761ad3d7f,48761ad3dc,48761ad3e4,48761ad3ef,48761ad3f4,48761ad3fc,48761ad41,48761ad43,48761ad5d,48761ad5e4,48761ad5ec,48761ad5fc,48761ad7,48761ad803,48761ad81c,48761ad824,48761ad82c,48761ad9d,48761ad9e4,48761ad9e84,48761ad9fc,48761ada04,48761ada0c,48761b2804,48761b2814,48761b281c,48761b283,48761b2844,48761b284c,48761b2995,48761b29b4,48761b29bc,48761b29d,48761b29f,48761b2a04'
network_object_from_test_data.add_node('1', {'id': '1', 'x': 508400, 'y': 162050})
network_object_from_test_data.add_link('2', u='21667818', v='1')
links = network_object_from_test_data.links_on_spatial_condition(region)
assert set(links) == {'1', '2'}
def test_links_on_spatial_condition_with_intersection_and_complex_geometry_that_falls_outside_region(
network_object_from_test_data):
region = Polygon([(-0.1487016677856445, 51.52556684350165), (-0.14063358306884766, 51.5255134425896),
(-0.13865947723388672, 51.5228700191647), (-0.14093399047851562, 51.52006622056997),
(-0.1492595672607422, 51.51974577545329), (-0.1508045196533203, 51.52276321095246),
(-0.1487016677856445, 51.52556684350165)])
network_object_from_test_data.add_link(
'2', u='21667818', v='25508485',
attribs={'geometry': LineString(
[(528504.1342843144, 182155.7435136598), (508400, 162050), (528489.467895946, 182206.20303669578)])})
links = network_object_from_test_data.links_on_spatial_condition(region, how='intersect')
assert set(links) == {'1', '2'}
def test_links_on_spatial_condition_with_containement(network_object_from_test_data):
region = Polygon([(-0.1487016677856445, 51.52556684350165), (-0.14063358306884766, 51.5255134425896),
(-0.13865947723388672, 51.5228700191647), (-0.14093399047851562, 51.52006622056997),
(-0.1492595672607422, 51.51974577545329), (-0.1508045196533203, 51.52276321095246),
(-0.1487016677856445, 51.52556684350165)])
network_object_from_test_data.add_node('1', {'id': '1', 'x': 508400, 'y': 162050})
network_object_from_test_data.add_link('2', u='21667818', v='1')
links = network_object_from_test_data.links_on_spatial_condition(region, how='within')
assert set(links) == {'1'}
def test_links_on_spatial_condition_with_containement_and_complex_geometry_that_falls_outside_region(
network_object_from_test_data):
region = Polygon([(-0.1487016677856445, 51.52556684350165), (-0.14063358306884766, 51.5255134425896),
(-0.13865947723388672, 51.5228700191647), (-0.14093399047851562, 51.52006622056997),
(-0.1492595672607422, 51.51974577545329), (-0.1508045196533203, 51.52276321095246),
(-0.1487016677856445, 51.52556684350165)])
network_object_from_test_data.add_link(
'2', u='21667818', v='25508485',
attribs={'geometry': LineString(
[(528504.1342843144, 182155.7435136598), (508400, 162050), (528489.467895946, 182206.20303669578)])})
links = network_object_from_test_data.links_on_spatial_condition(region, how='within')
assert set(links) == {'1'}
def test_links_on_spatial_condition_with_containement_and_s2_region(network_object_from_test_data):
region = '48761ad04d,48761ad054,48761ad05c,48761ad061,48761ad085,48761ad08c,48761ad094,48761ad09c,48761ad0b,48761ad0d,48761ad0f,48761ad14,48761ad182c,48761ad19c,48761ad1a4,48761ad1ac,48761ad1b4,48761ad1bac,48761ad3d7f,48761ad3dc,48761ad3e4,48761ad3ef,48761ad3f4,48761ad3fc,48761ad41,48761ad43,48761ad5d,48761ad5e4,48761ad5ec,48761ad5fc,48761ad7,48761ad803,48761ad81c,48761ad824,48761ad82c,48761ad9d,48761ad9e4,48761ad9e84,48761ad9fc,48761ada04,48761ada0c,48761b2804,48761b2814,48761b281c,48761b283,48761b2844,48761b284c,48761b2995,48761b29b4,48761b29bc,48761b29d,48761b29f,48761b2a04'
network_object_from_test_data.add_node('1', {'id': '1', 'x': 508400, 'y': 162050})
network_object_from_test_data.add_link('2', u='21667818', v='1')
links = network_object_from_test_data.links_on_spatial_condition(region, how='within')
assert set(links) == {'1'}
def test_links_on_spatial_condition_with_containement_and_complex_geometry_that_falls_outside_s2_region(
network_object_from_test_data):
region = '48761ad04d,48761ad054,48761ad05c,48761ad061,48761ad085,48761ad08c,48761ad094,48761ad09c,48761ad0b,48761ad0d,48761ad0f,48761ad14,48761ad182c,48761ad19c,48761ad1a4,48761ad1ac,48761ad1b4,48761ad1bac,48761ad3d7f,48761ad3dc,48761ad3e4,48761ad3ef,48761ad3f4,48761ad3fc,48761ad41,48761ad43,48761ad5d,48761ad5e4,48761ad5ec,48761ad5fc,48761ad7,48761ad803,48761ad81c,48761ad824,48761ad82c,48761ad9d,48761ad9e4,48761ad9e84,48761ad9fc,48761ada04,48761ada0c,48761b2804,48761b2814,48761b281c,48761b283,48761b2844,48761b284c,48761b2995,48761b29b4,48761b29bc,48761b29d,48761b29f,48761b2a04'
network_object_from_test_data.add_link(
'2', u='21667818', v='25508485',
attribs={'geometry': LineString(
[(528504.1342843144, 182155.7435136598), (508400, 162050), (528489.467895946, 182206.20303669578)])})
links = network_object_from_test_data.links_on_spatial_condition(region, how='within')
assert set(links) == {'1'}
def test_find_shortest_path_when_graph_has_no_extra_edge_choices():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike'], 'length': 1})
n.add_link('1', 2, 3, attribs={'modes': ['car'], 'length': 1})
n.add_link('2', 2, 3, attribs={'modes': ['bike'], 'length': 1})
n.add_link('3', 2, 3, attribs={'modes': ['walk'], 'length': 1})
bike_route = n.find_shortest_path(1, 3, modes='bike')
assert bike_route == ['0', '2']
def test_find_shortest_path_when_subgraph_is_pre_computed():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike'], 'length': 1})
n.add_link('1', 2, 3, attribs={'modes': ['car'], 'length': 1})
n.add_link('2', 2, 3, attribs={'modes': ['bike'], 'length': 1})
n.add_link('3', 2, 3, attribs={'modes': ['walk'], 'length': 1})
bike_g = n.modal_subgraph(modes='bike')
bike_route = n.find_shortest_path(1, 3, subgraph=bike_g)
assert bike_route == ['0', '2']
def test_find_shortest_path_defaults_to_full_graph():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike'], 'length': 1})
n.add_link('1', 2, 3, attribs={'modes': ['car'], 'freespeed': 3})
n.add_link('2', 2, 3, attribs={'modes': ['bike'], 'freespeed': 2})
n.add_link('3', 2, 3, attribs={'modes': ['walk'], 'freespeed': 1})
bike_route = n.find_shortest_path(1, 3)
assert bike_route == ['0', '1']
def test_find_shortest_path_when_graph_has_extra_edge_choice_for_freespeed_that_is_obvious():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike'], 'length': 1, 'freespeed': 10})
n.add_link('2', 2, 3, attribs={'modes': ['car', 'bike'], 'length': 1, 'freespeed': 10})
n.add_link('3', 2, 3, attribs={'modes': ['car', 'bike'], 'length': 1, 'freespeed': 1})
bike_route = n.find_shortest_path(1, 3, modes='bike')
assert bike_route == ['0', '2']
def test_find_shortest_path_when_graph_has_extra_edge_choice_with_attractive_mode():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike'], 'length': 1, 'freespeed': 10})
n.add_link('2', 2, 3, attribs={'modes': ['car', 'bike'], 'length': 1, 'freespeed': 10})
n.add_link('3', 2, 3, attribs={'modes': ['bike'], 'length': 1, 'freespeed': 1})
bike_route = n.find_shortest_path(1, 3, modes='bike')
assert bike_route == ['0', '3']
def test_find_shortest_path_and_return_just_nodes():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike'], 'length': 1, 'freespeed': 10})
n.add_link('1', 2, 3, attribs={'modes': ['car', 'bike'], 'length': 1, 'freespeed': 10})
bike_route = n.find_shortest_path(1, 3, return_nodes=True)
assert bike_route == [1, 2, 3]
def test_add_link_adds_link_with_specific_multi_idx():
n = Network('epsg:27700')
n.add_link('0', 1, 2, 0)
assert '0' in n.link_id_mapping
assert n.link_id_mapping['0'] == {'from': 1, 'to': 2, 'multi_edge_idx': 0}
assert n.graph[1][2][0] == {'from': 1, 'to': 2, 'id': '0'}
def test_add_link_generates_new_multi_idx_if_already_exists():
n = Network('epsg:27700')
n.add_link('0', 1, 2, 0)
n.add_link('1', 1, 2, 0)
assert '0' in n.link_id_mapping
assert '1' in n.link_id_mapping
assert n.link_id_mapping['0'] == {'from': 1, 'to': 2, 'multi_edge_idx': 0}
assert n.graph[1][2][0] == {'from': 1, 'to': 2, 'id': '0'}
assert n.link_id_mapping['1']['multi_edge_idx'] != 0
assert n.graph[1][2][n.link_id_mapping['1']['multi_edge_idx']] == {'from': 1, 'to': 2, 'id': '1'}
def test_reindex_node(network1):
assert [id for id, attribs in network1.nodes()] == ['101982', '101986']
assert [id for id, attribs in network1.links()] == ['0']
assert network1.link('0')['from'] == '101982'
assert network1.link('0')['to'] == '101986'
assert [(from_n, to_n) for from_n, to_n, attribs in network1.edges()] == [('101982', '101986')]
assert network1.link_id_mapping['0']['from'] == '101982'
network1.reindex_node('101982', '007')
assert [id for id, attribs in network1.nodes()] == ['007', '101986']
assert [id for id, attribs in network1.links()] == ['0']
assert network1.link('0')['from'] == '007'
assert network1.link('0')['to'] == '101986'
assert [(from_n, to_n) for from_n, to_n, attribs in network1.edges()] == [('007', '101986')]
assert network1.link_id_mapping['0']['from'] == '007'
correct_change_log_df = pd.DataFrame(
{'timestamp': {3: '2020-06-08 19:39:08', 4: '2020-06-08 19:39:08', 5: '2020-06-08 19:39:08'},
'change_event': {3: 'modify', 4: 'modify', 5: 'modify'}, 'object_type': {3: 'link', 4: 'node', 5: 'node'},
'old_id': {3: '0', 4: '101982', 5: '101982'}, 'new_id': {3: '0', 4: '007', 5: '101982'}, 'old_attributes': {
3: "{'id': '0', 'from': '101982', 'to': '101986', 'freespeed': 4.166666666666667, 'capacity': 600.0, 'permlanes': 1.0, 'oneway': '1', 'modes': ['car'], 's2_from': 5221390329378179879, 's2_to': 5221390328605860387, 'length': 52.765151087870265, 'attributes': {'osm:way:access': {'name': 'osm:way:access', 'class': 'java.lang.String', 'text': 'permissive'}, 'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String', 'text': 'unclassified'}, 'osm:way:id': {'name': 'osm:way:id', 'class': 'java.lang.Long', 'text': '26997928'}, 'osm:way:name': {'name': 'osm:way:name', 'class': 'java.lang.String', 'text': 'Brunswick Place'}}}",
4: "{'id': '101982', 'x': '528704.1425925883', 'y': '182068.78193707118', 'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}",
5: "{'id': '101982', 'x': '528704.1425925883', 'y': '182068.78193707118', 'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}"},
'new_attributes': {
3: "{'id': '0', 'from': '007', 'to': '101986', 'freespeed': 4.166666666666667, 'capacity': 600.0, 'permlanes': 1.0, 'oneway': '1', 'modes': ['car'], 's2_from': 5221390329378179879, 's2_to': 5221390328605860387, 'length': 52.765151087870265, 'attributes': {'osm:way:access': {'name': 'osm:way:access', 'class': 'java.lang.String', 'text': 'permissive'}, 'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String', 'text': 'unclassified'}, 'osm:way:id': {'name': 'osm:way:id', 'class': 'java.lang.Long', 'text': '26997928'}, 'osm:way:name': {'name': 'osm:way:name', 'class': 'java.lang.String', 'text': 'Brunswick Place'}}}",
4: "{'id': '007', 'x': '528704.1425925883', 'y': '182068.78193707118', 'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}",
5: "{'id': '007', 'x': '528704.1425925883', 'y': '182068.78193707118', 'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}"},
'diff': {3: [('change', 'from', ('101982', '007'))],
4: [('change', 'id', ('101982', '007')), ('change', 'id', ('101982', '007'))],
5: [('change', 'id', ('101982', '007'))]}})
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'old_attributes', 'new_attributes', 'diff']
assert_frame_equal(network1.change_log[cols_to_compare].tail(3), correct_change_log_df[cols_to_compare],
check_names=False,
check_dtype=False)
def test_reindex_node_when_node_id_already_exists(network1):
assert [id for id, attribs in network1.nodes()] == ['101982', '101986']
assert [id for id, attribs in network1.links()] == ['0']
assert network1.link('0')['from'] == '101982'
assert network1.link('0')['to'] == '101986'
assert [(from_n, to_n) for from_n, to_n, attribs in network1.edges()] == [('101982', '101986')]
assert network1.link_id_mapping['0']['from'] == '101982'
network1.reindex_node('101982', '101986')
node_ids = [id for id, attribs in network1.nodes()]
assert '101986' in node_ids
assert '101982' not in node_ids
assert len(set(node_ids)) == 2
assert network1.node(node_ids[0]) != network1.node(node_ids[1])
def test_reindex_link(network1):
assert [id for id, attribs in network1.nodes()] == ['101982', '101986']
assert [id for id, attribs in network1.links()] == ['0']
assert '0' in network1.link_id_mapping
assert network1.link('0')['from'] == '101982'
assert network1.link('0')['to'] == '101986'
assert [(from_n, to_n) for from_n, to_n, attribs in network1.edges()] == [('101982', '101986')]
assert network1.edge('101982', '101986')[0]['id'] == '0'
network1.reindex_link('0', '007')
assert [id for id, attribs in network1.nodes()] == ['101982', '101986']
assert [id for id, attribs in network1.links()] == ['007']
assert '0' not in network1.link_id_mapping
assert '007' in network1.link_id_mapping
assert network1.link('007')['from'] == '101982'
assert network1.link('007')['to'] == '101986'
assert [(from_n, to_n) for from_n, to_n, attribs in network1.edges()] == [('101982', '101986')]
assert network1.edge('101982', '101986')[0]['id'] == '007'
correct_change_log_df = pd.DataFrame(
{'timestamp': {3: '2020-06-08 19:34:48', 4: '2020-06-08 19:34:48'}, 'change_event': {3: 'modify', 4: 'modify'},
'object_type': {3: 'link', 4: 'link'}, 'old_id': {3: '0', 4: '0'}, 'new_id': {3: '007', 4: '0'},
'old_attributes': {
3: "{'id': '0', 'from': '101982', 'to': '101986', 'freespeed': 4.166666666666667, 'capacity': 600.0, 'permlanes': 1.0, 'oneway': '1', 'modes': ['car'], 's2_from': 5221390329378179879, 's2_to': 5221390328605860387, 'length': 52.765151087870265, 'attributes': {'osm:way:access': {'name': 'osm:way:access', 'class': 'java.lang.String', 'text': 'permissive'}, 'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String', 'text': 'unclassified'}, 'osm:way:id': {'name': 'osm:way:id', 'class': 'java.lang.Long', 'text': '26997928'}, 'osm:way:name': {'name': 'osm:way:name', 'class': 'java.lang.String', 'text': 'Brunswick Place'}}}",
4: "{'id': '0', 'from': '101982', 'to': '101986', 'freespeed': 4.166666666666667, 'capacity': 600.0, 'permlanes': 1.0, 'oneway': '1', 'modes': ['car'], 's2_from': 5221390329378179879, 's2_to': 5221390328605860387, 'length': 52.765151087870265, 'attributes': {'osm:way:access': {'name': 'osm:way:access', 'class': 'java.lang.String', 'text': 'permissive'}, 'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String', 'text': 'unclassified'}, 'osm:way:id': {'name': 'osm:way:id', 'class': 'java.lang.Long', 'text': '26997928'}, 'osm:way:name': {'name': 'osm:way:name', 'class': 'java.lang.String', 'text': 'Brunswick Place'}}}"},
'new_attributes': {
3: "{'id': '007', 'from': '101982', 'to': '101986', 'freespeed': 4.166666666666667, 'capacity': 600.0, 'permlanes': 1.0, 'oneway': '1', 'modes': ['car'], 's2_from': 5221390329378179879, 's2_to': 5221390328605860387, 'length': 52.765151087870265, 'attributes': {'osm:way:access': {'name': 'osm:way:access', 'class': 'java.lang.String', 'text': 'permissive'}, 'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String', 'text': 'unclassified'}, 'osm:way:id': {'name': 'osm:way:id', 'class': 'java.lang.Long', 'text': '26997928'}, 'osm:way:name': {'name': 'osm:way:name', 'class': 'java.lang.String', 'text': 'Brunswick Place'}}}",
4: "{'id': '007', 'from': '101982', 'to': '101986', 'freespeed': 4.166666666666667, 'capacity': 600.0, 'permlanes': 1.0, 'oneway': '1', 'modes': ['car'], 's2_from': 5221390329378179879, 's2_to': 5221390328605860387, 'length': 52.765151087870265, 'attributes': {'osm:way:access': {'name': 'osm:way:access', 'class': 'java.lang.String', 'text': 'permissive'}, 'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String', 'text': 'unclassified'}, 'osm:way:id': {'name': 'osm:way:id', 'class': 'java.lang.Long', 'text': '26997928'}, 'osm:way:name': {'name': 'osm:way:name', 'class': 'java.lang.String', 'text': 'Brunswick Place'}}}"},
'diff': {3: [('change', 'id', ('0', '007')), ('change', 'id', ('0', '007'))],
4: [('change', 'id', ('0', '007'))]}})
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'old_attributes', 'new_attributes', 'diff']
assert_frame_equal(network1.change_log[cols_to_compare].tail(2), correct_change_log_df[cols_to_compare],
check_names=False, check_dtype=False)
def test_reindex_link_when_link_id_already_exists(network1):
assert [id for id, attribs in network1.nodes()] == ['101982', '101986']
assert [id for id, attribs in network1.links()] == ['0']
assert network1.link('0')['from'] == '101982'
assert network1.link('0')['to'] == '101986'
assert [(from_n, to_n) for from_n, to_n, attribs in network1.edges()] == [('101982', '101986')]
network1.add_link('1', '101986', '101982', attribs={})
network1.reindex_link('0', '1')
link_ids = [id for id, attribs in network1.links()]
assert '1' in link_ids
assert '0' not in link_ids
assert len(set(link_ids)) == 2
assert network1.link(link_ids[0]) != network1.link(link_ids[1])
def test_modify_node_adds_attributes_in_the_graph_and_change_is_recorded_by_change_log():
n = Network('epsg:27700')
n.add_node(1, {'a': 1})
n.apply_attributes_to_node(1, {'b': 1})
assert n.node(1) == {'b': 1, 'a': 1}
correct_change_log_df = pd.DataFrame(
{'timestamp': {0: '2020-05-28 13:49:53', 1: '2020-05-28 13:49:53'}, 'change_event': {0: 'add', 1: 'modify'},
'object_type': {0: 'node', 1: 'node'}, 'old_id': {0: None, 1: 1}, 'new_id': {0: 1, 1: 1},
'old_attributes': {0: None, 1: "{'a': 1}"}, 'new_attributes': {0: "{'a': 1}", 1: "{'a': 1, 'b': 1}"},
'diff': {0: [('add', '', [('a', 1)]), ('add', 'id', 1)], 1: [('add', '', [('b', 1)])]}})
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'old_attributes', 'new_attributes', 'diff']
assert_frame_equal(n.change_log[cols_to_compare], correct_change_log_df[cols_to_compare], check_names=False,
check_dtype=False)
def test_modify_node_overwrites_existing_attributes_in_the_graph_and_change_is_recorded_by_change_log():
n = Network('epsg:27700')
n.add_node(1, {'a': 1})
n.apply_attributes_to_node(1, {'a': 4})
assert n.node(1) == {'a': 4}
correct_change_log_df = pd.DataFrame(
{'timestamp': {0: '2020-05-28 13:49:53', 1: '2020-05-28 13:49:53'}, 'change_event': {0: 'add', 1: 'modify'},
'object_type': {0: 'node', 1: 'node'}, 'old_id': {0: None, 1: 1}, 'new_id': {0: 1, 1: 1},
'old_attributes': {0: None, 1: "{'a': 1}"}, 'new_attributes': {0: "{'a': 1}", 1: "{'a': 4}"},
'diff': {0: [('add', '', [('a', 1)]), ('add', 'id', 1)], 1: [('change', 'a', (1, 4))]}})
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'old_attributes', 'new_attributes', 'diff']
assert_frame_equal(n.change_log[cols_to_compare], correct_change_log_df[cols_to_compare], check_dtype=False)
def test_modify_nodes_adds_and_changes_attributes_in_the_graph_and_change_is_recorded_by_change_log():
n = Network('epsg:27700')
n.add_node(1, {'a': 1})
n.add_node(2, {'b': 1})
n.apply_attributes_to_nodes({1: {'a': 4}, 2: {'a': 1}})
assert n.node(1) == {'a': 4}
assert n.node(2) == {'b': 1, 'a': 1}
correct_change_log_df = | pd.DataFrame(
{'timestamp': {0: '2020-06-01 15:07:51', 1: '2020-06-01 15:07:51', 2: '2020-06-01 15:07:51',
3: '2020-06-01 15:07:51'}, 'change_event': {0: 'add', 1: 'add', 2: 'modify', 3: 'modify'},
'object_type': {0: 'node', 1: 'node', 2: 'node', 3: 'node'}, 'old_id': {0: None, 1: None, 2: 1, 3: 2},
'new_id': {0: 1, 1: 2, 2: 1, 3: 2}, 'old_attributes': {0: None, 1: None, 2: "{'a': 1}", 3: "{'b': 1}"},
'new_attributes': {0: "{'a': 1}", 1: "{'b': 1}", 2: "{'a': 4}", 3: "{'b': 1, 'a': 1}"},
'diff': {0: [('add', '', [('a', 1)]), ('add', 'id', 1)], 1: [('add', '', [('b', 1)]), ('add', 'id', 2)],
2: [('change', 'a', (1, 4))], 3: [('add', '', [('a', 1)])]}
}) | pandas.DataFrame |
import argparse
import math
import pandas as pd
import numpy as np
import matplotlib
matplotlib.rcParams['text.usetex'] = True
import matplotlib.pyplot as plt
from scipy.stats.mstats import gmean
from npbench.infrastructure import utilities as util
# geomean which ignores NA values
def my_geomean(x):
x = x.dropna()
res = gmean(x)
return res
# make nice/short numbers with up/down indicator
def my_speedup_abbr(x):
prefix = ""
label = ""
if math.isnan(x):
return ""
if x < 1:
prefix = u"\u2191"
x = 1 / x
elif x > 1:
prefix = u"\u2193"
if x > 100:
x = int(x)
if x > 1000:
label = prefix + str(round(x / 1000, 1)) + "k"
else:
label = prefix + str(round(x, 1))
return str(label)
# make nice/short runtime numbers with seconds / milliseconds
def my_runtime_abbr(x):
suffix = " s"
if math.isnan(x):
return ""
if x < 0.1:
x = x * 1000
suffix = " ms"
return str(round(x, 2)) + suffix
def bootstrap_ci(data, statfunction=np.median, alpha=0.05, n_samples=300):
"""inspired by https://github.com/cgevans/scikits-bootstrap"""
# import warnings
def bootstrap_ids(data, n_samples=100):
for _ in range(n_samples):
yield np.random.randint(data.shape[0], size=(data.shape[0], ))
alphas = np.array([alpha / 2, 1 - alpha / 2])
nvals = np.round((n_samples - 1) * alphas).astype(int)
# if np.any(nvals < 10) or np.any(nvals >= n_samples - 10):
# warnings.warn(
# "Some values used extremal samples; results are probably unstable. "
# "Try to increase n_samples")
data = np.array(data)
if np.prod(data.shape) != max(data.shape):
raise ValueError("Data must be 1D")
data = data.ravel()
boot_indexes = bootstrap_ids(data, n_samples)
stat = np.asarray([statfunction(data[_ids]) for _ids in boot_indexes])
stat.sort(axis=0)
return stat[nvals][1] - stat[nvals][0]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-p",
"--preset",
choices=['S', 'M', 'L', 'paper'],
nargs="?",
default='S')
args = vars(parser.parse_args())
# create a database connection
database = r"npbench.db"
conn = util.create_connection(database)
data = pd.read_sql_query("SELECT * FROM results", conn)
# get rid of kind and dwarf, we don't use them
data = data.drop(['timestamp', 'kind', 'dwarf', 'version'],
axis=1).reset_index(drop=True)
# Remove everything that does not have a domain
data = data[data["domain"] != ""]
# remove everything that does not validate, then get rid of validated column
data = data[data['validated'] == True]
data = data.drop(['validated'], axis=1).reset_index(drop=True)
# Filter by preset
data = data[data['preset'] == args['preset']]
data = data.drop(['preset'], axis=1).reset_index(drop=True)
# for each framework and benchmark, choose only the best details,mode (based on median runtime), then get rid of those
aggdata = data.groupby(["benchmark", "domain", "framework", "mode", "details"],
dropna=False).agg({
"time": np.median
}).reset_index()
best = aggdata.sort_values("time").groupby(
["benchmark", "domain", "framework", "mode"],
dropna=False).first().reset_index()
bestgroup = best.drop(
["time"],
axis=1) # remove time, we don't need it and it is actually a median
data = pd.merge(left=bestgroup,
right=data,
on=["benchmark", "domain", "framework", "mode", "details"],
how="inner") # do a join on data and best
data = data.drop(['mode', 'details'], axis=1).reset_index(drop=True)
frmwrks = list(data['framework'].unique())
print(frmwrks)
assert ('numpy' in frmwrks)
frmwrks.remove('numpy')
frmwrks.append('numpy')
lfilter = ['benchmark', 'domain'] + frmwrks
# get improvement over numpy (keep times in best_wide_time for numpy column), reorder columns
best_wide = best.pivot_table(index=["benchmark", "domain"],
columns="framework",
values="time").reset_index() # pivot to wide form
best_wide = best_wide[lfilter].reset_index(drop=True)
best_wide_time = best_wide.copy(deep=True)
for f in frmwrks:
best_wide[f] = best_wide[f] / best_wide_time['numpy']
# compute ci-size for each
cidata = data.groupby(["benchmark", "domain", "framework"], dropna=False).agg({
"time": [bootstrap_ci, np.median]
}).reset_index()
cidata.columns = ['_'.join(col).strip() for col in cidata.columns.values]
cidata['perc'] = (cidata['time_bootstrap_ci'] / cidata['time_median']) * 100
overall = best_wide.drop(['domain'], axis=1)
overall = pd.melt(overall, [
'benchmark',
])
overall = overall.groupby(['framework']).value.apply(my_geomean).reset_index(
) #this throws warnings if NA is found, which is ok
overall_wide = overall.pivot_table(columns="framework",
values="value",
dropna=False).reset_index(drop=True)
overall_wide = overall_wide[frmwrks]
overall_time = best_wide_time.drop(['domain'], axis=1)
overall_time = | pd.melt(overall_time, ['benchmark']) | pandas.melt |
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 26 17:56:09 2021
@author: f.romano
"""
from datetime import datetime
from binance.client import Client
import pandas as pd
import statistics
import sklearn
import numpy as np
def calculate_values(samples, variables, values):
#samples_mean=statistics.mean(samples)
samples_mean=0
for v in variables:
for i in range(v["n_values"]):
values[v["offset_values"]+i]=statistics.mean(samples[v["offset_samples"]+v["n_samples"]*i:v["offset_samples"]+v["n_samples"]*(i+1)])-samples_mean
#print(values)
def get_target(samples, target_definition):
target_samples=samples[target_definition["samples_from"]:target_definition["samples_to"]]
target_samples.sort(reverse=True)
print(target_samples[:target_definition["n_samples"]])
return 100*((statistics.mean(target_samples[:5])-samples[0])/samples[0])
def import_samples():
df=pd.read_csv("Binance_BTCUSDT_minute.csv",usecols=[1, 3],parse_dates=[1],skiprows=1)
#print(df.head())
df["open"] = pd.to_numeric(df["open"], downcast="float")
df["date"] = | pd.to_datetime(df["date"]) | pandas.to_datetime |
import numpy as np
import pandas as pd
import re
import preprocessor as p
from scipy.io import arff
def read_and_process(path):
arff = open(path, 'r')
attributes = []
values = []
is_attr = True
arff.readline()
arff.readline()
while is_attr:
line = arff.readline()
if len(line.split()) == 0:
is_attr = False
continue
type = line.split()[0]
attr = ' '.join(line.split()[1:])
if type == "@attribute":
attributes.append(attr)
else:
is_attr = False
for line in arff.readlines():
if len(line.split(",")) < 10:
continue
else:
components = line.split(",")
values.append(components)
name = components[0].replace("\'", "").split("\\\\")[-1]
values[-1][0] = name
df = pd.DataFrame(columns=attributes, data=values)
df['idx'] = [int(re.sub('id_', '', i)) for i in df[df.columns[0]]]
df = df.drop(df.columns[0], axis=1)
df = df.set_index(['idx'])
df = df.apply(pd.to_numeric, errors='coerce')
df = df.sort_index()
return df
def sentence_preprocess(sentence):
sentence = p.clean(sentence)
# Remove hyperlinks
sentence = re.sub(r'http\S+', ' ', sentence)
# Remove punctuations and numbers
# sentence = re.sub('[^a-zA-Z]', ' ', sentence)
sentence = re.sub('[^a-zA-Z.?!,]', ' ', sentence)
# Single character removal (except I)
sentence = re.sub(r"\s+[a-zA-HJ-Z]\s+", ' ', sentence)
# Removing multiple spaces
sentence = re.sub(r'\s+', ' ', sentence)
return sentence
def load_features(dir, dataset):
idx = 'id'
if dataset == 'kaggle':
drop_cols = ['BROWN-FREQ numeric', 'K-F-FREQ numeric', 'K-F-NCATS numeric', 'K-F-NSAMP numeric',
'T-L-FREQ numeric', 'Extraversion numeric'
, '\'Emotional stability\' numeric', 'Agreeableness numeric', 'Conscientiousness numeric',
'\'Openness to experience\' numeric']
mairesse = read_and_process(dir + dataset + '_mairesse_labeled.arff')
mairesse = mairesse.drop(drop_cols, axis=1)
elif dataset == 'essays':
idx = '#AUTHID'
mairesse = pd.read_csv(dir + dataset + '_mairesse_labeled.csv')
mairesse = mairesse.set_index(mairesse.columns[0])
nrc = pd.read_csv(dir + dataset + '_nrc.csv').set_index([idx])
# nrc = nrc.sort_values(by=['id'])
# nrc = nrc.drop(['id'], axis=1)
nrc_vad = | pd.read_csv(dir + dataset + '_nrc_vad.csv') | pandas.read_csv |
from datetime import datetime, timedelta
from io import StringIO
import re
import sys
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
from pandas.compat import PYPY
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_object_dtype,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
PeriodIndex,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
)
from pandas.core.accessor import PandasDelegate
from pandas.core.arrays import DatetimeArray, PandasArray, TimedeltaArray
from pandas.core.base import NoNewAttributesMixin, PandasObject
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
import pandas.util.testing as tm
class CheckStringMixin:
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
def test_tricky_container(self):
if not hasattr(self, "unicode_container"):
pytest.skip("Need unicode_container to test with this")
repr(self.unicode_container)
str(self.unicode_container)
class CheckImmutable:
mutable_regex = re.compile("does not support mutable operations")
def check_mutable_error(self, *args, **kwargs):
# Pass whatever function you normally would to pytest.raises
# (after the Exception kind).
with pytest.raises(TypeError):
self.mutable_regex(*args, **kwargs)
def test_no_mutable_funcs(self):
def setitem():
self.container[0] = 5
self.check_mutable_error(setitem)
def setslice():
self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem():
del self.container[0]
self.check_mutable_error(delitem)
def delslice():
del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert isinstance(result, klass)
assert result == expected
class TestPandasDelegate:
class Delegator:
_properties = ["foo"]
_methods = ["bar"]
def _set_foo(self, value):
self.foo = value
def _get_foo(self):
return self.foo
foo = property(_get_foo, _set_foo, doc="foo property")
def bar(self, *args, **kwargs):
""" a test bar method """
pass
class Delegate(PandasDelegate, PandasObject):
def __init__(self, obj):
self.obj = obj
def setup_method(self, method):
pass
def test_invalid_delegation(self):
# these show that in order for the delegation to work
# the _delegate_* methods need to be overridden to not raise
# a TypeError
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._properties,
typ="property",
)
self.Delegate._add_delegate_accessors(
delegate=self.Delegator, accessors=self.Delegator._methods, typ="method"
)
delegate = self.Delegate(self.Delegator())
with pytest.raises(TypeError):
delegate.foo
with pytest.raises(TypeError):
delegate.foo = 5
with pytest.raises(TypeError):
delegate.foo()
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
# Delegate does not implement memory_usage.
# Check that we fall back to in-built `__sizeof__`
# GH 12924
delegate = self.Delegate(self.Delegator())
sys.getsizeof(delegate)
class Ops:
def _allow_na_ops(self, obj):
"""Whether to skip test cases including NaN"""
if (isinstance(obj, Index) and obj.is_boolean()) or not obj._can_hold_na:
# don't test boolean / integer dtypes
return False
return True
def setup_method(self, method):
self.bool_index = tm.makeBoolIndex(10, name="a")
self.int_index = tm.makeIntIndex(10, name="a")
self.float_index = tm.makeFloatIndex(10, name="a")
self.dt_index = tm.makeDateIndex(10, name="a")
self.dt_tz_index = tm.makeDateIndex(10, name="a").tz_localize(tz="US/Eastern")
self.period_index = tm.makePeriodIndex(10, name="a")
self.string_index = tm.makeStringIndex(10, name="a")
self.unicode_index = tm.makeUnicodeIndex(10, name="a")
arr = np.random.randn(10)
self.bool_series = Series(arr, index=self.bool_index, name="a")
self.int_series = Series(arr, index=self.int_index, name="a")
self.float_series = Series(arr, index=self.float_index, name="a")
self.dt_series = Series(arr, index=self.dt_index, name="a")
self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True)
self.period_series = Series(arr, index=self.period_index, name="a")
self.string_series = Series(arr, index=self.string_index, name="a")
self.unicode_series = Series(arr, index=self.unicode_index, name="a")
types = ["bool", "int", "float", "dt", "dt_tz", "period", "string", "unicode"]
self.indexes = [getattr(self, "{}_index".format(t)) for t in types]
self.series = [getattr(self, "{}_series".format(t)) for t in types]
# To test narrow dtypes, we use narrower *data* elements, not *index* elements
index = self.int_index
self.float32_series = Series(arr.astype(np.float32), index=index, name="a")
arr_int = np.random.choice(10, size=10, replace=False)
self.int8_series = Series(arr_int.astype(np.int8), index=index, name="a")
self.int16_series = Series(arr_int.astype(np.int16), index=index, name="a")
self.int32_series = Series(arr_int.astype(np.int32), index=index, name="a")
self.uint8_series = Series(arr_int.astype(np.uint8), index=index, name="a")
self.uint16_series = Series(arr_int.astype(np.uint16), index=index, name="a")
self.uint32_series = Series(arr_int.astype(np.uint32), index=index, name="a")
nrw_types = ["float32", "int8", "int16", "int32", "uint8", "uint16", "uint32"]
self.narrow_series = [getattr(self, "{}_series".format(t)) for t in nrw_types]
self.objs = self.indexes + self.series + self.narrow_series
def check_ops_properties(self, props, filter=None, ignore_failures=False):
for op in props:
for o in self.is_valid_objs:
# if a filter, skip if it doesn't match
if filter is not None:
filt = o.index if isinstance(o, Series) else o
if not filter(filt):
continue
try:
if isinstance(o, Series):
expected = Series(getattr(o.index, op), index=o.index, name="a")
else:
expected = getattr(o, op)
except (AttributeError):
if ignore_failures:
continue
result = getattr(o, op)
# these could be series, arrays or scalars
if isinstance(result, Series) and isinstance(expected, Series):
tm.assert_series_equal(result, expected)
elif isinstance(result, Index) and isinstance(expected, Index):
tm.assert_index_equal(result, expected)
elif isinstance(result, np.ndarray) and isinstance(
expected, np.ndarray
):
tm.assert_numpy_array_equal(result, expected)
else:
assert result == expected
# freq raises AttributeError on an Int64Index because its not
# defined we mostly care about Series here anyhow
if not ignore_failures:
for o in self.not_valid_objs:
# an object that is datetimelike will raise a TypeError,
# otherwise an AttributeError
err = AttributeError
if issubclass(type(o), DatetimeIndexOpsMixin):
err = TypeError
with pytest.raises(err):
getattr(o, op)
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_binary_ops_docs(self, klass):
op_map = {
"add": "+",
"sub": "-",
"mul": "*",
"mod": "%",
"pow": "**",
"truediv": "/",
"floordiv": "//",
}
for op_name in op_map:
operand1 = klass.__name__.lower()
operand2 = "other"
op = op_map[op_name]
expected_str = " ".join([operand1, op, operand2])
assert expected_str in getattr(klass, op_name).__doc__
# reverse version of the binary ops
expected_str = " ".join([operand2, op, operand1])
assert expected_str in getattr(klass, "r" + op_name).__doc__
class TestIndexOps(Ops):
def setup_method(self, method):
super().setup_method(method)
self.is_valid_objs = self.objs
self.not_valid_objs = []
def test_none_comparison(self):
# bug brought up by #1079
# changed from TypeError in 0.17.0
for o in self.is_valid_objs:
if isinstance(o, Series):
o[0] = np.nan
# noinspection PyComparisonWithNone
result = o == None # noqa
assert not result.iat[0]
assert not result.iat[1]
# noinspection PyComparisonWithNone
result = o != None # noqa
assert result.iat[0]
assert result.iat[1]
result = None == o # noqa
assert not result.iat[0]
assert not result.iat[1]
result = None != o # noqa
assert result.iat[0]
assert result.iat[1]
if is_datetime64_dtype(o) or is_datetime64tz_dtype(o):
# Following DatetimeIndex (and Timestamp) convention,
# inequality comparisons with Series[datetime64] raise
with pytest.raises(TypeError):
None > o
with pytest.raises(TypeError):
o > None
else:
result = None > o
assert not result.iat[0]
assert not result.iat[1]
result = o < None
assert not result.iat[0]
assert not result.iat[1]
def test_ndarray_compat_properties(self):
for o in self.objs:
# Check that we work.
for p in ["shape", "dtype", "T", "nbytes"]:
assert getattr(o, p, None) is not None
# deprecated properties
for p in ["flags", "strides", "itemsize"]:
with tm.assert_produces_warning(FutureWarning):
assert getattr(o, p, None) is not None
with tm.assert_produces_warning(FutureWarning):
assert hasattr(o, "base")
# If we have a datetime-like dtype then needs a view to work
# but the user is responsible for that
try:
with tm.assert_produces_warning(FutureWarning):
assert o.data is not None
except ValueError:
pass
with pytest.raises(ValueError):
with tm.assert_produces_warning(FutureWarning):
o.item() # len > 1
assert o.ndim == 1
assert o.size == len(o)
with tm.assert_produces_warning(FutureWarning):
assert Index([1]).item() == 1
assert Series([1]).item() == 1
def test_value_counts_unique_nunique(self):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._values
if isinstance(values, Index):
# reset name not to affect latter process
values.name = None
# create repeated values, 'n'th element is repeated by n+1 times
# skip boolean, because it only has 2 values at most
if isinstance(o, Index) and o.is_boolean():
continue
elif isinstance(o, Index):
expected_index = Index(o[::-1])
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = "a"
else:
expected_index = Index(values[::-1])
idx = o.index.repeat(range(1, len(o) + 1))
# take-based repeat
indices = np.repeat(np.arange(len(o)), range(1, len(o) + 1))
rep = values.take(indices)
o = klass(rep, index=idx, name="a")
# check values has the same dtype as the original
assert o.dtype == orig.dtype
expected_s = Series(
range(10, 0, -1), index=expected_index, dtype="int64", name="a"
)
result = o.value_counts()
tm.assert_series_equal(result, expected_s)
assert result.index.name is None
assert result.name == "a"
result = o.unique()
if isinstance(o, Index):
assert isinstance(result, o.__class__)
tm.assert_index_equal(result, orig)
assert result.dtype == orig.dtype
elif is_datetime64tz_dtype(o):
# datetimetz Series returns array of Timestamp
assert result[0] == orig[0]
for r in result:
assert isinstance(r, Timestamp)
tm.assert_numpy_array_equal(
result.astype(object), orig._values.astype(object)
)
else:
tm.assert_numpy_array_equal(result, orig.values)
assert result.dtype == orig.dtype
assert o.nunique() == len(np.unique(o.values))
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_value_counts_unique_nunique_null(self, null_obj):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._ndarray_values
if not self._allow_na_ops(o):
continue
# special assign to the numpy array
if is_datetime64tz_dtype(o):
if isinstance(o, DatetimeIndex):
v = o.asi8
v[0:2] = iNaT
values = o._shallow_copy(v)
else:
o = o.copy()
o[0:2] = pd.NaT
values = o._values
elif needs_i8_conversion(o):
values[0:2] = iNaT
values = o._shallow_copy(values)
else:
values[0:2] = null_obj
# check values has the same dtype as the original
assert values.dtype == o.dtype
# create repeated values, 'n'th element is repeated by n+1
# times
if isinstance(o, (DatetimeIndex, PeriodIndex)):
expected_index = o.copy()
expected_index.name = None
# attach name to klass
o = klass(values.repeat(range(1, len(o) + 1)))
o.name = "a"
else:
if isinstance(o, DatetimeIndex):
expected_index = orig._values._shallow_copy(values)
else:
expected_index = Index(values)
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = "a"
# check values has the same dtype as the original
assert o.dtype == orig.dtype
# check values correctly have NaN
nanloc = np.zeros(len(o), dtype=np.bool)
nanloc[:3] = True
if isinstance(o, Index):
tm.assert_numpy_array_equal(pd.isna(o), nanloc)
else:
exp = Series(nanloc, o.index, name="a")
tm.assert_series_equal(pd.isna(o), exp)
expected_s_na = Series(
list(range(10, 2, -1)) + [3],
index=expected_index[9:0:-1],
dtype="int64",
name="a",
)
expected_s = Series(
list(range(10, 2, -1)),
index=expected_index[9:1:-1],
dtype="int64",
name="a",
)
result_s_na = o.value_counts(dropna=False)
tm.assert_series_equal(result_s_na, expected_s_na)
assert result_s_na.index.name is None
assert result_s_na.name == "a"
result_s = o.value_counts()
tm.assert_series_equal(o.value_counts(), expected_s)
assert result_s.index.name is None
assert result_s.name == "a"
result = o.unique()
if isinstance(o, Index):
tm.assert_index_equal(result, Index(values[1:], name="a"))
elif is_datetime64tz_dtype(o):
# unable to compare NaT / nan
tm.assert_extension_array_equal(result[1:], values[2:])
assert result[0] is pd.NaT
else:
tm.assert_numpy_array_equal(result[1:], values[2:])
assert pd.isna(result[0])
assert result.dtype == orig.dtype
assert o.nunique() == 8
assert o.nunique(dropna=False) == 9
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_inferred(self, klass):
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
expected = Series([4, 3, 2, 1], index=["b", "a", "d", "c"])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(np.unique(np.array(s_values, dtype=np.object_)))
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.unique(np.array(s_values, dtype=np.object_))
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 4
# don't sort, have to sort after the fact as not sorting is
# platform-dep
hist = s.value_counts(sort=False).sort_values()
expected = Series([3, 1, 4, 2], index=list("acbd")).sort_values()
tm.assert_series_equal(hist, expected)
# sort ascending
hist = s.value_counts(ascending=True)
expected = Series([1, 2, 3, 4], index=list("cdab"))
tm.assert_series_equal(hist, expected)
# relative histogram.
hist = s.value_counts(normalize=True)
expected = Series([0.4, 0.3, 0.2, 0.1], index=["b", "a", "d", "c"])
tm.assert_series_equal(hist, expected)
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_bins(self, klass):
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
# bins
with pytest.raises(TypeError):
s.value_counts(bins=1)
s1 = Series([1, 1, 2, 3])
res1 = s1.value_counts(bins=1)
exp1 = Series({Interval(0.997, 3.0): 4})
tm.assert_series_equal(res1, exp1)
res1n = s1.value_counts(bins=1, normalize=True)
exp1n = Series({Interval(0.997, 3.0): 1.0})
tm.assert_series_equal(res1n, exp1n)
if isinstance(s1, Index):
tm.assert_index_equal(s1.unique(), Index([1, 2, 3]))
else:
exp = np.array([1, 2, 3], dtype=np.int64)
tm.assert_numpy_array_equal(s1.unique(), exp)
assert s1.nunique() == 3
# these return the same
res4 = s1.value_counts(bins=4, dropna=True)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4 = s1.value_counts(bins=4, dropna=False)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4n = s1.value_counts(bins=4, normalize=True)
exp4n = Series([0.5, 0.25, 0.25, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4n, exp4n)
# handle NA's properly
s_values = ["a", "b", "b", "b", np.nan, np.nan, "d", "d", "a", "a", "b"]
s = klass(s_values)
expected = Series([4, 3, 2], index=["b", "a", "d"])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(["a", "b", np.nan, "d"])
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.array(["a", "b", np.nan, "d"], dtype=object)
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 3
s = klass({})
expected = Series([], dtype=np.int64)
tm.assert_series_equal(s.value_counts(), expected, check_index_type=False)
# returned dtype differs depending on original
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), Index([]), exact=False)
else:
tm.assert_numpy_array_equal(s.unique(), np.array([]), check_dtype=False)
assert s.nunique() == 0
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_datetime64(self, klass):
# GH 3002, datetime64[ns]
# don't test names though
txt = "\n".join(
[
"xxyyzz20100101PIE",
"xxyyzz20100101GUM",
"xxyyzz20100101EGG",
"xxyyww20090101EGG",
"foofoo20080909PIE",
"foofoo20080909GUM",
]
)
f = StringIO(txt)
df = pd.read_fwf(
f, widths=[6, 8, 3], names=["person_id", "dt", "food"], parse_dates=["dt"]
)
s = klass(df["dt"].copy())
s.name = None
idx = pd.to_datetime(
["2010-01-01 00:00:00", "2008-09-09 00:00:00", "2009-01-01 00:00:00"]
)
expected_s = Series([3, 2, 1], index=idx)
tm.assert_series_equal(s.value_counts(), expected_s)
expected = np_array_datetime64_compat(
["2010-01-01 00:00:00", "2009-01-01 00:00:00", "2008-09-09 00:00:00"],
dtype="datetime64[ns]",
)
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), DatetimeIndex(expected))
else:
tm.assert_numpy_array_equal(s.unique(), expected)
assert s.nunique() == 3
# with NaT
s = df["dt"].copy()
s = klass(list(s.values) + [pd.NaT])
result = s.value_counts()
assert result.index.dtype == "datetime64[ns]"
tm.assert_series_equal(result, expected_s)
result = s.value_counts(dropna=False)
expected_s[pd.NaT] = 1
tm.assert_series_equal(result, expected_s)
unique = s.unique()
assert unique.dtype == "datetime64[ns]"
# numpy_array_equal cannot compare pd.NaT
if isinstance(s, Index):
exp_idx = DatetimeIndex(expected.tolist() + [pd.NaT])
tm.assert_index_equal(unique, exp_idx)
else:
tm.assert_numpy_array_equal(unique[:3], expected)
assert pd.isna(unique[3])
assert s.nunique() == 3
assert s.nunique(dropna=False) == 4
# timedelta64[ns]
td = df.dt - df.dt + timedelta(1)
td = klass(td, name="dt")
result = td.value_counts()
expected_s = Series([6], index=[Timedelta("1day")], name="dt")
tm.assert_series_equal(result, expected_s)
expected = TimedeltaIndex(["1 days"], name="dt")
if isinstance(td, Index):
tm.assert_index_equal(td.unique(), expected)
else:
tm.assert_numpy_array_equal(td.unique(), expected.values)
td2 = timedelta(1) + (df.dt - df.dt)
td2 = klass(td2, name="dt")
result2 = td2.value_counts()
tm.assert_series_equal(result2, expected_s)
def test_factorize(self):
for orig in self.objs:
o = orig.copy()
if isinstance(o, Index) and o.is_boolean():
exp_arr = np.array([0, 1] + [0] * 8, dtype=np.intp)
exp_uniques = o
exp_uniques = Index([False, True])
else:
exp_arr = np.array(range(len(o)), dtype=np.intp)
exp_uniques = o
codes, uniques = o.factorize()
tm.assert_numpy_array_equal(codes, exp_arr)
if isinstance(o, Series):
tm.assert_index_equal(uniques, Index(orig), check_names=False)
else:
# factorize explicitly resets name
tm.assert_index_equal(uniques, exp_uniques, check_names=False)
def test_factorize_repeated(self):
for orig in self.objs:
o = orig.copy()
# don't test boolean
if isinstance(o, Index) and o.is_boolean():
continue
# sort by value, and create duplicates
if isinstance(o, Series):
o = o.sort_values()
n = o.iloc[5:].append(o)
else:
indexer = o.argsort()
o = o.take(indexer)
n = o[5:].append(o)
exp_arr = np.array(
[5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.intp
)
codes, uniques = n.factorize(sort=True)
tm.assert_numpy_array_equal(codes, exp_arr)
if isinstance(o, Series):
tm.assert_index_equal(
uniques, Index(orig).sort_values(), check_names=False
)
else:
tm.assert_index_equal(uniques, o, check_names=False)
exp_arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4], np.intp)
codes, uniques = n.factorize(sort=False)
tm.assert_numpy_array_equal(codes, exp_arr)
if isinstance(o, Series):
expected = Index(o.iloc[5:10].append(o.iloc[:5]))
tm.assert_index_equal(uniques, expected, check_names=False)
else:
expected = o[5:10].append(o[:5])
tm.assert_index_equal(uniques, expected, check_names=False)
def test_duplicated_drop_duplicates_index(self):
# GH 4060
for original in self.objs:
if isinstance(original, Index):
# special case
if original.is_boolean():
result = original.drop_duplicates()
expected = Index([False, True], name="a")
tm.assert_index_equal(result, expected)
continue
# original doesn't have duplicates
expected = np.array([False] * len(original), dtype=bool)
duplicated = original.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = original.drop_duplicates()
tm.assert_index_equal(result, original)
assert result is not original
# has_duplicates
assert not original.has_duplicates
# create repeated values, 3rd and 5th values are duplicated
idx = original[list(range(len(original))) + [5, 3]]
expected = np.array([False] * len(original) + [True, True], dtype=bool)
duplicated = idx.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
tm.assert_index_equal(idx.drop_duplicates(), original)
base = [False] * len(idx)
base[3] = True
base[5] = True
expected = np.array(base)
duplicated = idx.duplicated(keep="last")
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = idx.drop_duplicates(keep="last")
tm.assert_index_equal(result, idx[~expected])
base = [False] * len(original) + [True, True]
base[3] = True
base[5] = True
expected = np.array(base)
duplicated = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = idx.drop_duplicates(keep=False)
tm.assert_index_equal(result, idx[~expected])
with pytest.raises(
TypeError,
match=(
r"drop_duplicates\(\) got an " r"unexpected keyword argument"
),
):
idx.drop_duplicates(inplace=True)
else:
expected = Series(
[False] * len(original), index=original.index, name="a"
)
tm.assert_series_equal(original.duplicated(), expected)
result = original.drop_duplicates()
tm.assert_series_equal(result, original)
assert result is not original
idx = original.index[list(range(len(original))) + [5, 3]]
values = original._values[list(range(len(original))) + [5, 3]]
s = Series(values, index=idx, name="a")
expected = Series(
[False] * len(original) + [True, True], index=idx, name="a"
)
tm.assert_series_equal(s.duplicated(), expected)
tm.assert_series_equal(s.drop_duplicates(), original)
base = [False] * len(idx)
base[3] = True
base[5] = True
expected = Series(base, index=idx, name="a")
tm.assert_series_equal(s.duplicated(keep="last"), expected)
tm.assert_series_equal(
s.drop_duplicates(keep="last"), s[~np.array(base)]
)
base = [False] * len(original) + [True, True]
base[3] = True
base[5] = True
expected = Series(base, index=idx, name="a")
tm.assert_series_equal(s.duplicated(keep=False), expected)
tm.assert_series_equal(
s.drop_duplicates(keep=False), s[~np.array(base)]
)
s.drop_duplicates(inplace=True)
tm.assert_series_equal(s, original)
def test_drop_duplicates_series_vs_dataframe(self):
# GH 14192
df = pd.DataFrame(
{
"a": [1, 1, 1, "one", "one"],
"b": [2, 2, np.nan, np.nan, np.nan],
"c": [3, 3, np.nan, np.nan, "three"],
"d": [1, 2, 3, 4, 4],
"e": [
datetime(2015, 1, 1),
datetime(2015, 1, 1),
datetime(2015, 2, 1),
pd.NaT,
pd.NaT,
],
}
)
for column in df.columns:
for keep in ["first", "last", False]:
dropped_frame = df[[column]].drop_duplicates(keep=keep)
dropped_series = df[column].drop_duplicates(keep=keep)
tm.assert_frame_equal(dropped_frame, dropped_series.to_frame())
def test_fillna(self):
# # GH 11343
# though Index.fillna and Series.fillna has separate impl,
# test here to confirm these works as the same
for orig in self.objs:
o = orig.copy()
values = o.values
# values will not be changed
result = o.fillna(o.astype(object).values[0])
if isinstance(o, Index):
tm.assert_index_equal(o, result)
else:
tm.assert_series_equal(o, result)
# check shallow_copied
assert o is not result
for null_obj in [np.nan, None]:
for orig in self.objs:
o = orig.copy()
klass = type(o)
if not self._allow_na_ops(o):
continue
if needs_i8_conversion(o):
values = o.astype(object).values
fill_value = values[0]
values[0:2] = pd.NaT
else:
values = o.values.copy()
fill_value = o.values[0]
values[0:2] = null_obj
expected = [fill_value] * 2 + list(values[2:])
expected = klass(expected, dtype=orig.dtype)
o = klass(values)
# check values has the same dtype as the original
assert o.dtype == orig.dtype
result = o.fillna(fill_value)
if isinstance(o, Index):
tm.assert_index_equal(result, expected)
else:
tm.assert_series_equal(result, expected)
# check shallow_copied
assert o is not result
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
for o in self.objs:
res = o.memory_usage()
res_deep = o.memory_usage(deep=True)
if is_object_dtype(o) or (
isinstance(o, Series) and | is_object_dtype(o.index) | pandas.core.dtypes.common.is_object_dtype |
import datacube
import math
import calendar
import ipywidgets
import numpy as np
import matplotlib as mpl
import matplotlib.cm as cm
from matplotlib import colors as mcolours
import matplotlib.patheffects as PathEffects
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from datetime import datetime,date, timedelta
from pyproj import Proj, transform
from datacube.utils import geometry
from datacube.utils.geometry import CRS,Geometry
from datacube.model import Range
import shapely
from shapely.geometry import shape
import fiona
import rasterio.features
from fiona.crs import from_epsg
import os,sys
# from hdstats import nangeomedian_pcm
from tqdm import tqdm
import pandas as pd
import gdal
#from utils import *
import fnmatch
import csv
import json
import stat
import types
from glob import glob
import shutil
import time
import zipfile
import requests
import logging
import hashlib
import re
import socket
from dateutil.relativedelta import relativedelta
import operator
import time
sys.path.append('/home/noa/.snap/snap-python')
import snappy
from snappy import GPF
from snappy import ProductIO
from snappy import HashMap
from snappy import jpy
import subprocess
from snappy import WKTReader
from snappy import File
from snappy import ProgressMonitor
from time import *
import datetime as dt
from osgeo import osr,ogr
def change_colnames(df):
cols = ["id"] + [x.split("_")[1] + "_" + str(datetime.strptime(x.split("_")[0], '%B%d%Y').timetuple().tm_yday) for x in df.columns[1:]]
return cols
def doy_to_date(doy, year):
return datetime.strptime(str(doy) + year, '%j%Y')
def get_band(df, band_name, with_id=False):
bands = [x for x in df.columns if band_name in x]
if with_id:
bands = ["id"] + bands
return df.loc[:, bands]
def simple_daily_interpolation(df, name, start_doy, end_doy, year='2021', interp_method='linear', ):
n_cols = df.shape[1]
df.index = df.index.astype(int)
df.columns = [pd.to_datetime(x.split("_")[-1]) for x in df]
date_of_doy = doy_to_date(end_doy, year)
if df.columns[-1] < date_of_doy:
df[date_of_doy] = np.nan
dfT = df.T
dfT = dfT.resample('1d').asfreq()
df_daily = dfT.T.interpolate(interp_method, axis=1).ffill(axis=1).bfill(axis=1)
# df_daily.columns = df_daily.columns.map(lambda t: "{}_{}".format(name, t.timetuple().tm_yday))
df_daily = df_daily[df.columns]
df_daily.columns = df_daily.columns.map(lambda t: "{}_mean_{}".format(name, t.date()))
return df_daily
def daily_fs(fs, year, start_doy, end_doy, bandnames, s1 = False, s1_names = [], has_id=False, keep_init = True):
band_list = []
# print("Interpolation...")
for b in tqdm(bandnames):
band_df = get_band(fs, b)
band_df = simple_daily_interpolation(band_df, b, start_doy, end_doy, year)
cols = [x for x in band_df.columns if start_doy <= pd.to_datetime(x.split("_")[-1]).dayofyear <= end_doy]
band_df = band_df[cols]
band_list.append(band_df)
if s1:
band_list.append(fs.filter(regex = 'vv|vh'))
fs_daily = pd.concat(band_list, axis=1, join='inner')
if has_id:
fs_daily.insert(0, 'id', fs["id"])
return fs_daily
def show_options():
classification_type = ipywidgets.Select(
options=['Pixel-Based', 'Object-Based'],
description='Type',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True
)
sentinel1 = ipywidgets.Checkbox(description='Use Sentinel-1',)
sentinel2 = ipywidgets.Checkbox(description='Use Sentinel-2',)
text1 = ipywidgets.Text(description='Shapefile Path',)
text2 = ipywidgets.Text(description='Name of "ID" column',)
text3 = ipywidgets.Text(description='Name of "Crop Type" column',)
box = ipywidgets.VBox([classification_type,sentinel1,sentinel2,text1,text2,text3])
return box
def get_dates(timeStart,timeEnd):
query = {
'time': (timeStart,timeEnd),
'product': 's2_preprocessed_v2',
}
dc = datacube.Datacube(app="test", config="/home/noa/datacube.conf")
bands = ['B02']#,'B03','B04','B05','B06','B07','B08','B8A','B11','B12','ndvi','ndwi','psri']
data = dc.load(**query,dask_chunks={})
dates = []
for day in list(data.time.values):
a = str(day-np.timedelta64(1,'D'))[:10]
b = str(day+np.timedelta64(1,'D'))[:10]
dates.append((a,b))
return dates
def generate_feature_space_preload(filename,outfile_dir,colID,
timeStart,timeEnd,samples = None,
classficationType='object',sentinel1=True,sentinel2=True):
'''
Generates a pixel-based or object-based feature space in the format of csv
:param filename: the full path of the shapefile
:param outfile: the full path of the feature space csv to be generated
:param colID: the column name that holds the id for each parcel
:param timeStart: starting date of acquitions in the YYYY-MM-DD format
:param timeEnd: ending date of acquitions in the YYYY-MM-DD format
:param classficationType: the choice whether the classification will be pixel-based or object-based. Valid values 'o' or 'p'
:param sentinel1: boolean value for generating or not features based on sentinel-1 products
:param sentinel2: boolean value for generating or not features based on sentinel-2 products
'''
bands_indices = [0,1,2,3,4,5,6,7,8,9,11,12,13]
if filename is None:
sys.exit('No filename has been given')
if colID is None:
sys.exit('No column for ID has been given')
# if colType is None:
# sys.exit('No column for crop type has been given')
if not os.path.exists(filename):
sys.exit('File does not exist')
dates = get_dates(timeStart, timeEnd)
for day in dates:
stime = time()
query = {
'time': (day[0], day[1]),
'product': 's2_preprocessed_v2',
}
dc = datacube.Datacube(app="test", config="/home/noa/datacube.conf")
bands = ['B02','B03','B04','B05','B06','B07','B08','B8A','B11','B12','ndvi','ndwi','psri']
#print("Loading data for range {}...Patience is a great asset!".format(day[0]+' to '+day[1]))
data = dc.load(**query)
data['ndvi'] = calculate_index(data,'ndvi')
data['ndwi'] = calculate_index(data,'ndmi')
data['psri'] = calculate_index(data,'psri')
for index in bands:
data[index] = data[index].where(((data['SCL']>=3) & (data['SCL']<=7)), np.nan)
outfile = outfile_dir + 'fs' + str(data.time.values[0])[:10] + '.csv'
data = data.to_array()
data.loc['SCL'] = (data.loc['SCL']>=3) & (data.loc['SCL']<7)
#print("Data has been loaded")
cloud_free_ratio = data[10].values.sum() / (data.shape[2]*data.shape[3])
if cloud_free_ratio < 0.05:
print("Cloud coverage for {} is {}%, thus this date is skipped.".format(str(pd.to_datetime(data.time[0].values).date()), 100-cloud_free_ratio*100))
continue
if sentinel2:
ras = gdal.Open('/data2/netherlands/s2/2017/31UFT_clipped/03/27/March272017_B02.tif')
gt = ras.GetGeoTransform()
inv_gt = gdal.InvGeoTransform(gt)
srs = osr.SpatialReference()
srs.ImportFromEPSG(3857)
driver = ogr.GetDriverByName("ESRI Shapefile")
dataSource = driver.Open(filename, 0)
ds = dataSource.GetLayer()
parcels = {}
iterations = 0
for f in tqdm(ds):
geom = f.GetGeometryRef()
geom = geom.ExportToWkt()
vect_tmp_drv = ogr.GetDriverByName('MEMORY')
vect_tmp_src = vect_tmp_drv.CreateDataSource('')
vect_tmp_lyr = vect_tmp_src.CreateLayer('', srs, ogr.wkbPolygon)
vect_tmp_lyr.CreateField(ogr.FieldDefn("id", ogr.OFTInteger))
feat = ogr.Feature(vect_tmp_lyr.GetLayerDefn())
feat.SetField("id", f['id'])
feat_geom = ogr.CreateGeometryFromWkt(geom)
feat.SetGeometry(feat_geom)
vect_tmp_lyr.CreateFeature(feat)
xmin, xmax, ymin, ymax = feat_geom.GetEnvelope()
off_ulx, off_uly = map(int, gdal.ApplyGeoTransform(inv_gt, xmin, ymax))
off_lrx, off_lry = map(int, gdal.ApplyGeoTransform(inv_gt, xmax, ymin))
# Specify offset and rows and columns to read
rows, columns = (off_lry - off_uly) + 1, (off_lrx - off_ulx) + 1
aa = off_uly
bb = off_lry + 1
cc = off_ulx
dd = off_lrx + 1
iterations += 1
if samples is not None and samples == iterations:
break
parcels[f[colID]] = {}
values = [f[colID]]
if data is None or geom is None:
continue
ras_tmp = gdal.GetDriverByName('MEM').Create('', columns, rows, 1, gdal.GDT_Byte)
ras_tmp.SetProjection(ras.GetProjection())
ras_gt = list(gt)
ras_gt[0], ras_gt[3] = gdal.ApplyGeoTransform(gt, off_ulx, off_uly)
ras_tmp.SetGeoTransform(ras_gt)
gdal.RasterizeLayer(ras_tmp, [1], vect_tmp_lyr, burn_values=[1])
mask = ras_tmp.GetRasterBand(1).ReadAsArray()
parcel_data = data[:,0,aa:bb,cc:dd].where(mask)
parcel_size = mask.sum()
cloudfree_parcel_size = parcel_data[10].values.sum()
cloud_coverage = cloudfree_parcel_size / (parcel_size + 1e-7)
if cloud_coverage < 30:
values.extend([np.nan for _ in range(len(bands))])
else:
values.extend(parcel_data[bands_indices].mean(axis = (1,2)).round(3).values)
if len(values)==1:
continue
parcels[f[colID]] = values
headings = [colID] + [(band+'_mean_'+str(pd.to_datetime(data.time[0].values).date())) for band in bands]
df = pd.DataFrame.from_dict(parcels, orient='index')
df.to_csv(outfile,header=headings,index=False)
print("Time elapsed for creating feature space for {} is {}s".format(str(pd.to_datetime(data.time[0].values).date()), stime-time()))
def generate_feature_space_preload_indices(dates,filename,outfile,colID,colType,timeStart,timeEnd,samples = None,classficationType='object',sentinel1=True,sentinel2=True):
'''
Generates a pixel-based or object-based feature space in the format of csv
:param filename: the full path of the shapefile
:param outfile: the full path of the feature space csv to be generated
:param colID: the column name that holds the id for each parcel
:param colType: the column name that holds the crop code for each parcel
:param timeStart: starting date of acquitions in the YYYY-MM-DD format
:param timeEnd: ending date of acquitions in the YYYY-MM-DD format
:param classficationType: the choice whether the classification will be pixel-based or object-based. Valid values 'o' or 'p'
:param sentinel1: boolean value for generating or not features based on sentinel-1 products
:param sentinel2: boolean value for generating or not features based on sentinel-2 products
'''
from osgeo import osr,ogr
if filename is None:
sys.exit('No filename has been given')
if colID is None:
sys.exit('No column for ID has been given')
if colType is None:
sys.exit('No column for crop type has been given')
if not os.path.exists(filename):
sys.exit('File does not exist')
for day in dates:
query = {
'time': (day[0], day[1]),
'product': 's2_preprocessed_v2',
}
dc = datacube.Datacube(app="test", config="/home/noa/datacube.conf")
bands = ['ndvi','ndwi','psri']
print("Loading data for range {}...please wait approximately 3-7 minutes".format(day[0]+'to'+day[1]))
data = dc.load(**query)
print("Data has been loaded")
data['ndvi'] = calculate_index(data,'ndvi')
data['ndwi'] = calculate_index(data,'ndmi')
data['psri'] = calculate_index(data,'psri')
outfile = outfile + str(data.time.values[0])[:10] + '.csv'
if sentinel2:
ras = gdal.Open('/data2/netherlands/s2/2017/31UFT/03/24/March242017_B05.tif')
gt = ras.GetGeoTransform()
inv_gt = gdal.InvGeoTransform(gt)
srs = osr.SpatialReference()
srs.ImportFromEPSG(3857)
driver = ogr.GetDriverByName("ESRI Shapefile")
dataSource = driver.Open(filename, 0)
ds = dataSource.GetLayer()
parcels = {}
headings = [colID]
iterations = 0
parcel_data = {}
for f in tqdm(ds):
if iterations == 1000:
break
id = int(f['id'])
geom = f.GetGeometryRef()
geom = geom.ExportToWkt()
vect_tmp_drv = ogr.GetDriverByName('MEMORY')
vect_tmp_src = vect_tmp_drv.CreateDataSource('')
vect_tmp_lyr = vect_tmp_src.CreateLayer('', srs, ogr.wkbPolygon)
vect_tmp_lyr.CreateField(ogr.FieldDefn("id", ogr.OFTInteger))
feat = ogr.Feature(vect_tmp_lyr.GetLayerDefn())
feat.SetField("id", id)
feat_geom = ogr.CreateGeometryFromWkt(geom)
feat.SetGeometry(feat_geom)
vect_tmp_lyr.CreateFeature(feat)
xmin, xmax, ymin, ymax = feat_geom.GetEnvelope()
off_ulx, off_uly = map(int, gdal.ApplyGeoTransform(inv_gt, xmin, ymax))
off_lrx, off_lry = map(int, gdal.ApplyGeoTransform(inv_gt, xmax, ymin))
rows, columns = (off_lry - off_uly) + 1, (off_lrx - off_ulx) + 1
ras_tmp = gdal.GetDriverByName('MEM').Create('', columns, rows, 1, gdal.GDT_Byte)
ras_tmp.SetProjection(ras.GetProjection())
ras_gt = list(gt)
ras_gt[0], ras_gt[3] = gdal.ApplyGeoTransform(gt, off_ulx, off_uly)
ras_tmp.SetGeoTransform(ras_gt)
gdal.RasterizeLayer(ras_tmp, [1], vect_tmp_lyr, burn_values=[1])
mask = ras_tmp.GetRasterBand(1).ReadAsArray()
aa = off_uly
bb = off_lry + 1
cc = off_ulx
dd = off_lrx + 1
iterations += 1
if samples is not None and samples == iterations:
break
parcels[f[colID]] = {}
values = [f[colID]]
if data is None or geom is None:
continue
for band in bands:
if band == "SCL":
continue
for i in range(data[band].shape[0]):
if iterations == 1:
headings.append(band+'_'+str(data.time[i].values).split('.')[0][:10])
try:
cloud_indices = np.where(np.logical_and(data['SCL'][i].values==3,data['SCL'][i].values>7))
data[band][i].values[aa:bb,cc:dd][cloud_indices] = np.nan
if (np.all(data[band][i].values[aa:bb,cc:dd]==np.nan)):
values.append(np.nan)
else:
values.append(round(np.nanmean(data[band][i].values[aa:bb,cc:dd]),3))
except Exception as e:
print(e)
values.append(np.nan)
if iterations == 1:
headings.append('CropType')
values.append(f[colType])
parcels[f[colID]] = values
df = pd.DataFrame.from_dict(parcels, orient='index')
df.to_csv(outfile,header=headings,index = False)
def generate_feature_space_backscatter(filename,outfile,colID,colType,timeStart,timeEnd,samples=None,classficationType='object'):
'''
Generates a pixel-based or object-based feature space in the format of csv
:param filename: the full path of the shapefile
:param outfile: the full path of the feature space csv to be generated
:param colID: the column name that holds the id for each parcel
:param colType: the column name that holds the crop code for each parcel
:param timeStart: starting date of acquitions in the YYYY-MM-DD format
:param timeEnd: ending date of acquitions in the YYYY-MM-DD format
:param classficationType: the choice whether the classification will be pixel-based or object-based. Valid values 'o' or 'p'
'''
from osgeo import osr,ogr
if filename is None:
sys.exit('No filename has been given')
if colID is None:
sys.exit('No column for ID has been given')
if colType is None:
sys.exit('No column for crop type has been given')
if not os.path.exists(filename):
sys.exit('File does not exist')
query = {
'time': (timeStart, timeEnd),
'product': 'sentinel1',
}
dc = datacube.Datacube(app="test", config="/home/noa/datacube.conf")
bands = ['vv','vh']
print("Loading data for range {}...please wait approximately 3-7 minutes".format(timeStart+' to '+timeEnd))
data = dc.load(**query)
print("Data has been loaded")
if(len(data)==0):
print("Empty data")
return
ws = outfile
outfile = outfile + 'fs_sar_' + str(timeStart) + '_to_' + str(timeEnd) + '.csv'
if True:
for rasterfile in os.listdir(os.path.join(ws,'s1',timeStart[:4],'backscatter')):
if '.tif' in rasterfile:
basemap = rasterfile
break
if basemap is None:
return
print("Using ", basemap, " as basemap image")
ras = gdal.Open(os.path.join(ws,'s1',timeStart[:4],'backscatter',basemap))
gt = ras.GetGeoTransform()
inv_gt = gdal.InvGeoTransform(gt)
srs = osr.SpatialReference()
srs.ImportFromEPSG(3857)
driver = ogr.GetDriverByName("ESRI Shapefile")
dataSource = driver.Open(filename, 0)
ds = dataSource.GetLayer()
parcels = {}
headings = [colID]
iterations = 0
parcel_data = {}
for f in tqdm(ds):
id = int(f['id'])
geom = f.GetGeometryRef()
geom = geom.ExportToWkt()
vect_tmp_drv = ogr.GetDriverByName('MEMORY')
vect_tmp_src = vect_tmp_drv.CreateDataSource('')
vect_tmp_lyr = vect_tmp_src.CreateLayer('', srs, ogr.wkbPolygon)
vect_tmp_lyr.CreateField(ogr.FieldDefn("id", ogr.OFTInteger))
feat = ogr.Feature(vect_tmp_lyr.GetLayerDefn())
feat.SetField("id", id)
feat_geom = ogr.CreateGeometryFromWkt(geom)
feat.SetGeometry(feat_geom)
vect_tmp_lyr.CreateFeature(feat)
xmin, xmax, ymin, ymax = feat_geom.GetEnvelope()
off_ulx, off_uly = map(int, gdal.ApplyGeoTransform(inv_gt, xmin, ymax))
off_lrx, off_lry = map(int, gdal.ApplyGeoTransform(inv_gt, xmax, ymin))
rows, columns = (off_lry - off_uly) + 1, (off_lrx - off_ulx) + 1
ras_tmp = gdal.GetDriverByName('MEM').Create('', columns, rows, 1, gdal.GDT_Byte)
ras_tmp.SetProjection(ras.GetProjection())
ras_gt = list(gt)
ras_gt[0], ras_gt[3] = gdal.ApplyGeoTransform(gt, off_ulx, off_uly)
ras_tmp.SetGeoTransform(ras_gt)
gdal.RasterizeLayer(ras_tmp, [1], vect_tmp_lyr, burn_values=[1])
mask = ras_tmp.GetRasterBand(1).ReadAsArray()
aa = off_uly
bb = off_lry + 1
cc = off_ulx
dd = off_lrx + 1
iterations += 1
if samples is not None and samples == iterations:
break
parcels[f[colID]] = {}
values = [f[colID]]
if data is None or geom is None:
continue
for band in bands:
for i in range(data[band].shape[0]):
if iterations == 1:
headings.append(band+'_mean_'+str(data.time[i].values).split('.')[0][:10])
headings.append(band+'_std_'+str(data.time[i].values).split('.')[0][:10])
values.append(round(np.nanmean(data[band][i].values[aa:bb,cc:dd]),3))
values.append(round(np.nanstd(data[band][i].values[aa:bb,cc:dd]),3))
if len(values)==1:
continue
if iterations == 1:
headings.append('CropType')
values.append(f[colType])
parcels[f[colID]] = values
df = pd.DataFrame.from_dict(parcels, orient='index')
df.to_csv(outfile,header=headings,index = False)
def generate_feature_space_coherence(filename,outfile,colID,colType,timeStart,timeEnd,samples=None,classficationType='object'):
'''
Generates a pixel-based or object-based feature space in the format of csv
:param filename: the full path of the shapefile
:param outfile: the full path of the feature space csv to be generated
:param colID: the column name that holds the id for each parcel
:param colType: the column name that holds the crop code for each parcel
:param timeStart: starting date of acquitions in the YYYY-MM-DD format
:param timeEnd: ending date of acquitions in the YYYY-MM-DD format
:param classficationType: the choice whether the classification will be pixel-based or object-based. Valid values 'o' or 'p'
'''
from osgeo import osr,ogr
if filename is None:
sys.exit('No filename has been given')
if colID is None:
sys.exit('No column for ID has been given')
if colType is None:
sys.exit('No column for crop type has been given')
if not os.path.exists(filename):
sys.exit('File does not exist')
query = {
'time': (timeStart, timeEnd),
'product': 'sentinel1_coherence',
}
dc = datacube.Datacube(app="test", config="/home/noa/datacube.conf")
bands = ['vv','vh','vv_vh']
print("Loading data for range {}...please wait approximately 3-7 minutes".format(timeStart+' to '+timeEnd))
data = dc.load(**query)
print("Data has been loaded")
if(len(data)==0):
return
data['vv_vh'] = data.vv / data.vh
ws = outfile
outfile = outfile + 'coherence_' + str(timeStart) + '_to_' + str(timeEnd) + '.csv'
if True:
for rasterfile in os.listdir(os.path.join(ws,'s1',timeStart[:4],'coherence')):
if '.tif' in rasterfile:
basemap = rasterfile
break
if basemap is None:
return
ras = gdal.Open(os.path.join(ws,'s1',timeStart[:4],'coherence',basemap))
gt = ras.GetGeoTransform()
inv_gt = gdal.InvGeoTransform(gt)
srs = osr.SpatialReference()
srs.ImportFromEPSG(3857)
driver = ogr.GetDriverByName("ESRI Shapefile")
dataSource = driver.Open(filename, 0)
ds = dataSource.GetLayer()
parcels = {}
headings = [colID]
iterations = 0
parcel_data = {}
for f in tqdm(ds):
id = int(f['id'])
geom = f.GetGeometryRef()
geom = geom.ExportToWkt()
vect_tmp_drv = ogr.GetDriverByName('MEMORY')
vect_tmp_src = vect_tmp_drv.CreateDataSource('')
vect_tmp_lyr = vect_tmp_src.CreateLayer('', srs, ogr.wkbPolygon)
vect_tmp_lyr.CreateField(ogr.FieldDefn("id", ogr.OFTInteger))
feat = ogr.Feature(vect_tmp_lyr.GetLayerDefn())
feat.SetField("id", id)
feat_geom = ogr.CreateGeometryFromWkt(geom)
feat.SetGeometry(feat_geom)
vect_tmp_lyr.CreateFeature(feat)
xmin, xmax, ymin, ymax = feat_geom.GetEnvelope()
off_ulx, off_uly = map(int, gdal.ApplyGeoTransform(inv_gt, xmin, ymax))
off_lrx, off_lry = map(int, gdal.ApplyGeoTransform(inv_gt, xmax, ymin))
rows, columns = (off_lry - off_uly) + 1, (off_lrx - off_ulx) + 1
ras_tmp = gdal.GetDriverByName('MEM').Create('', columns, rows, 1, gdal.GDT_Byte)
ras_tmp.SetProjection(ras.GetProjection())
ras_gt = list(gt)
ras_gt[0], ras_gt[3] = gdal.ApplyGeoTransform(gt, off_ulx, off_uly)
ras_tmp.SetGeoTransform(ras_gt)
gdal.RasterizeLayer(ras_tmp, [1], vect_tmp_lyr, burn_values=[1])
mask = ras_tmp.GetRasterBand(1).ReadAsArray()
aa = off_uly
bb = off_lry + 1
cc = off_ulx
dd = off_lrx + 1
iterations += 1
if samples is not None and samples == iterations:
break
parcels[f[colID]] = {}
values = [f[colID]]
if data is None or geom is None:
continue
for band in bands:
for i in range(data[band].shape[0]):
if iterations == 1:
headings.append('coherence_'+band+'_mean_'+str(data.time[i].values).split('.')[0][:10])
headings.append('coherence_'+band+'_std_'+str(data.time[i].values).split('.')[0][:10])
values.append(round(np.nanmean(data[band][i].values[aa:bb,cc:dd]),3))
values.append(round(np.nanstd(data[band][i].values[aa:bb,cc:dd]),3))
if len(values)==1:
continue
if iterations == 1:
headings.append('CropType')
values.append(f[colType])
parcels[f[colID]] = values
df = pd.DataFrame.from_dict(parcels, orient='index')
df.to_csv(outfile,header=headings,index = False)
def generate_mask(outfile_dir, filename, colID = 'id', timeStart = '2017-03-01', timeEnd = '2017-10-30', method = 'object', sentinel2=True):
bands_indices = [0,1,2,3,4,5,6,7,8,9,11,12,13]
if filename is None:
sys.exit('No filename has been given')
if colID is None:
sys.exit('No column for ID has been given')
# if colType is None:
# sys.exit('No column for crop type has been given')
if not os.path.exists(filename):
sys.exit('File does not exist')
dates = get_dates(timeStart, timeEnd)
parcels = {}
iterations = 0
headings = []
for day in dates:
stime = time()
query = {
'time': (day[0], day[1]),
'product': 's2_preprocessed_v2',
}
dc = datacube.Datacube(app="test", config="/home/noa/datacube.conf")
#print("Loading data for range {}...Patience is a great asset!".format(day[0]+' to '+day[1]))
data = dc.load(measurements=['SCL'], **query)
outfile = outfile_dir + 'mask' + str(data.time.values[0])[:10] + '.csv'
data = data.to_array()
data.loc['SCL'] = (data.loc['SCL']>=3) & (data.loc['SCL']<7)
cloud_free_ratio = data[0].values.sum() / (data.shape[2]*data.shape[3])
if cloud_free_ratio < 0.05:
print("Cloud coverage for {} is {}%, thus this date is skipped.".format(str(pd.to_datetime(data.time[0].values).date()), 100-cloud_free_ratio*100))
continue
headings.append(str(pd.to_datetime(data.time[0].values.date())))
if sentinel2:
ras = gdal.Open('/data2/netherlands/s2/2017/31UFT_clipped/03/27/March272017_B02.tif')
gt = ras.GetGeoTransform()
inv_gt = gdal.InvGeoTransform(gt)
srs = osr.SpatialReference()
srs.ImportFromEPSG(3857)
driver = ogr.GetDriverByName("ESRI Shapefile")
dataSource = driver.Open(filename, 0)
ds = dataSource.GetLayer()
for f in tqdm(ds):
geom = f.GetGeometryRef()
geom = geom.ExportToWkt()
vect_tmp_drv = ogr.GetDriverByName('MEMORY')
vect_tmp_src = vect_tmp_drv.CreateDataSource('')
vect_tmp_lyr = vect_tmp_src.CreateLayer('', srs, ogr.wkbPolygon)
vect_tmp_lyr.CreateField(ogr.FieldDefn("id", ogr.OFTInteger))
feat = ogr.Feature(vect_tmp_lyr.GetLayerDefn())
feat.SetField("id", f['id'])
feat_geom = ogr.CreateGeometryFromWkt(geom)
feat.SetGeometry(feat_geom)
vect_tmp_lyr.CreateFeature(feat)
xmin, xmax, ymin, ymax = feat_geom.GetEnvelope()
off_ulx, off_uly = map(int, gdal.ApplyGeoTransform(inv_gt, xmin, ymax))
off_lrx, off_lry = map(int, gdal.ApplyGeoTransform(inv_gt, xmax, ymin))
# Specify offset and rows and columns to read
rows, columns = (off_lry - off_uly) + 1, (off_lrx - off_ulx) + 1
aa = off_uly
bb = off_lry + 1
cc = off_ulx
dd = off_lrx + 1
if iterations == 0:
parcels[f[colID]] = []
if data is None or geom is None:
continue
ras_tmp = gdal.GetDriverByName('MEM').Create('', columns, rows, 1, gdal.GDT_Byte)
ras_tmp.SetProjection(ras.GetProjection())
ras_gt = list(gt)
ras_gt[0], ras_gt[3] = gdal.ApplyGeoTransform(gt, off_ulx, off_uly)
ras_tmp.SetGeoTransform(ras_gt)
gdal.RasterizeLayer(ras_tmp, [1], vect_tmp_lyr, burn_values=[1])
mask = ras_tmp.GetRasterBand(1).ReadAsArray()
parcel_data = data[:,0,aa:bb,cc:dd].where(mask)
parcel_size = mask.sum()
cloudfree_parcel_size = (parcel_data[0].values == 1).sum()
cloud_free_ratio = cloudfree_parcel_size / (parcel_size + 1e-7)
parcels[f[colID]].append(cloud_free_ratio)
iterations += 1
df = pd.DataFrame.from_dict(parcels, orient='index')
df.to_csv(outfile_dir,header=headings,index=False)
return df
def geometry_mask(geoms, geobox, all_touched=False, invert=False):
return rasterio.features.geometry_mask([geom.to_crs(geobox.crs) for geom in geoms],
out_shape=geobox.shape,
transform=geobox.affine,
all_touched=all_touched,
invert=invert)
def display_rgb(img,alpha=1., figsize=(10, 10)):
# rgb = np.stack([img[b_r], img[b_g], img[b_b]], axis=-1)
rgb = img/img.max() * alpha
plt.figure(figsize=figsize)
plt.imshow(rgb)
def read_shapefile(shape_file, exhaustive = False, selected_ids = None, threshold=1):
ds = fiona.open(shape_file)
crs = geometry.CRS(ds.crs_wkt)
cnt = 0
geometries = []
ids = []
for f in ds:
feature_id = f['properties']['id']
feature_geom = f['geometry']
geom = Geometry(feature_geom, crs)
# bounds = shape(feature_geom).bounds
# if 'MULTIPOLYGON' in geom.wkt:
# continue
if selected_ids is None:
geometries.append(geom)
ids.append(feature_id)
cnt += 1
if exhaustive:
continue
elif cnt == threshold:
return geometries, ids
elif feature_id in selected_ids:
geometries.append(geom)
ids.append(feature_id)
selected_ids.remove(feature_id)
if len(selected_ids) == 0:
return geometries, ids
def read_shapefile_simple(shape_file, exhaustive = False, selected_ids = None, threshold=1):
ds = fiona.open(shape_file)
crs = geometry.CRS(ds.crs_wkt)
cnt = 0
geometries = []
ids = []
for f in ds:
feature_id = f['properties']['id']
geom = f['geometry']
# bounds = shape(feature_geom).bounds
# if 'MULTIPOLYGON' in geom.wkt:
# continue
if selected_ids is None:
geometries.append(geom)
ids.append(feature_id)
cnt += 1
if exhaustive:
continue
elif cnt == threshold:
return geometries, ids
elif feature_id in selected_ids:
geometries.append(geom)
ids.append(feature_id)
selected_ids.remove(feature_id)
if len(selected_ids) == 0:
return geometries, ids
def check_index(index):
if index.lower() not in ['ndvi', 'ndwi', 'ndmi', 'psri', 'savi']:
print("Error in name of index. Calculation for '{}' is not supported.".format(index))
return False
return True
def calculate_index(data, index):
if index.lower() == 'ndvi':
return (data.B08.astype('float16')-data.B04.astype('float16'))/(data.B08.astype('float16')+data.B04.astype('float16'))
elif index.lower() == 'ndwi':
return (data.B08.astype('float16')-data.B03.astype('float16'))/(data.B08.astype('float16')+data.B03.astype('float16'))
elif index.lower() == 'ndmi':
return (data.B08.astype('float16')-data.B11.astype('float16'))/(data.B08.astype('float16')+data.B11.astype('float16'))
elif index.lower() == 'psri':
return (data.B04.astype('float16')-data.B02.astype('float16'))/data.B06.astype('float16')
else:
return None
def round5 (x):
return (round((x-5)/10))*10+5
def plot_coherence_timeseries(geom, index, start_time='2017-01-01', end_time='2017-12-31', cols=4,
masked=False, buffer=300, show = True):
dc = datacube.Datacube(app="test", config="/home/noa/datacube.conf")
bounds = [(round5(x), round5(y)) for x, y in geom.exterior.coords]
if not masked:
geom = geom.buffer(buffer)
query = {
'geopolygon': geom,
'time': (start_time, end_time),
'product': "sentinel1_coherence"
}
data = dc.load(measurements=[index], **query)
mask = geometry_mask([geom], data.geobox, invert=True)
bounds_xy = [(np.where(data.x == x)[0][0], np.where(data.y == y)[0][0]) for x, y in bounds]
x = [point[0] for point in bounds_xy]
y = [point[1] for point in bounds_xy]
all_pixels = len(data.x) * len(data.y) # np.count_nonzero(mask)
rows = len(data.time.values)//cols
if len(data.time.values) % cols > 0:
rows += 1
if masked:
data = data.where(mask)
fig = plt.figure(figsize=(20, 30))
for i in range(len(data.time.values)):
img = data[index][i].values
fig.add_subplot(rows, cols, i+1)
fig.subplots_adjust(hspace=0.2)
im = plt.imshow(img, vmin=0, vmax=1, cmap='binary')
plt.title(index + ' for ' + str(data.time.values[i])[:10], size=10)
plt.plot(x, y, color='r')
fig.subplots_adjust(right=0.9)
cbar_ax = fig.add_axes([0.95, 0.15, 0.02, 0.7])
fig.colorbar(im, cax=cbar_ax)
if show:
plt.show()
else:
return fig
def plot_dindex_timeseries(geom, index='ndvi', start_time='2019-01-01',
cols=5, end_time='2019-12-31',
cloud_free_percentage=60, masked=False,
buffer=300, show=True):
dc = datacube.Datacube(app="test", config="/home/noa/datacube.conf")
bounds = [(round5(x), round5(y)) for x, y in geom.exterior.coords]
if not masked and buffer is None:
buffer = 300
geom2 = geom.buffer(buffer)
elif not buffer is None and not masked:
geom2 = geom.buffer(buffer)
else:
geom2 = geom
query = {
'geopolygon': geom2,
'time': (start_time, end_time),
'product': "s2_preprocessed_v2"
}
if not check_index(index):
return None
if index.lower() == 'ndvi':
data = dc.load(measurements=['B04', 'B08', 'SCL'], **query)
colormap = 'RdYlGn'
elif index.lower() == 'ndwi':
data = dc.load(measurements=['B03', 'B08', 'SCL'], **query)
colormap = 'YlGnBu'
elif index.lower() == 'ndmi':
data = dc.load(measurements=['B08', 'B11', 'SCL'], **query)
colormap = 'YlGnBu'
elif index.lower() == 'psri':
data = dc.load(measurements=['B02', 'B04', 'B06', 'SCL'], **query)
colormap = 'YlOrRd'
mask = geometry_mask([geom], data.geobox, invert=True)
all_pixels = np.count_nonzero(mask) # len(data.x) * len(data.y)
nan_array = data['SCL'].where((data['SCL'] >= 3) & (data['SCL'] < 7) & mask)
timestamps = data.time.values
if masked:
data = data.where(mask)
data[index] = calculate_index(data, index)
cloud_percs = []
for i in range(len(timestamps)):
free_pixels = nan_array[i].count().values
cloud_perc = (free_pixels / all_pixels) * 100
cloud_percs.append(cloud_perc)
bounds_xy = [(np.where(data.x == x)[0][0], np.where(data.y == y)[0][0]) for x, y in bounds]
x = [point[0] for point in bounds_xy]
y = [point[1] for point in bounds_xy]
free_index = [i for i in range(len(cloud_percs)) if cloud_percs[i] > cloud_free_percentage]
timestamps = timestamps[free_index]
ndvis = data[index][free_index]
cloud_percs = np.array(cloud_percs)[free_index]
rows = len(timestamps) // cols + 1
fig = plt.figure(figsize=(cols * 4, rows * 4))
fig.tight_layout()
for i in range(1,len(timestamps)):
img = ndvis[i].values.astype('float64')
img2 = ndvis[i-1].values.astype('float64')
img = abs(img - img2)
fig.add_subplot(rows, cols, i + 1)
im = plt.imshow(img, vmin=0, vmax=2, cmap=colormap)
plt.title('d{} for {} and {}'.format(index.upper(), pd.to_datetime(timestamps[i]).date(), pd.to_datetime(timestamps[i-1]).date()), size=10)
plt.plot(x, y, color='r')
plt.axis('off')
fig.subplots_adjust(right=0.9)
cbar_ax = fig.add_axes([0.95, 0.15, 0.02, 0.7])
fig.colorbar(im, cax=cbar_ax)
if show:
plt.show()
else:
return fig
def plot_index_timeseries(geom, index='ndvi', start_time='2019-01-01',
cols=5, end_time='2019-12-31',
cloud_free_percentage=60, masked=False,
buffer=300, show = True):
dc = datacube.Datacube(app="test", config="/home/noa/datacube.conf")
bounds = [(round5(x), round5(y)) for x, y in geom.exterior.coords]
if not masked and buffer is None:
buffer = 300
geom2 = geom.buffer(buffer)
elif not buffer is None and not masked:
geom2 = geom.buffer(buffer)
else:
geom2 = geom
query = {
'geopolygon': geom2,
'time': (start_time, end_time),
'product': "s2_preprocessed_v2"
}
if not check_index(index):
return None
if index.lower() == 'ndvi':
data = dc.load(measurements=['B04', 'B08', 'SCL'], **query)
colormap = 'RdYlGn'
elif index.lower() == 'ndwi':
data = dc.load(measurements=['B03', 'B08', 'SCL'], **query)
colormap = 'YlGnBu'
elif index.lower() == 'ndmi':
data = dc.load(measurements=['B08', 'B11', 'SCL'], **query)
colormap = 'YlGnBu'
elif index.lower() == 'psri':
data = dc.load(measurements=['B02', 'B04', 'B06', 'SCL'], **query)
colormap = 'YlOrRd'
mask = geometry_mask([geom], data.geobox, invert=True)
all_pixels = np.count_nonzero(mask) #len(data.x) * len(data.y)
nan_array = data['SCL'].where((data['SCL'] >= 3) & (data['SCL'] < 7) & mask)
timestamps = data.time.values
if masked:
data = data.where(mask)
data[index] = calculate_index(data, index)
cloud_percs = []
for i in range(len(timestamps)):
free_pixels = nan_array[i].count().values
cloud_perc = (free_pixels / all_pixels) * 100
cloud_percs.append(cloud_perc)
bounds_xy = [(np.where(data.x == x)[0][0], np.where(data.y == y)[0][0]) for x, y in bounds]
x = [point[0] for point in bounds_xy]
y = [point[1] for point in bounds_xy]
free_index = [i for i in range(len(cloud_percs)) if cloud_percs[i] > cloud_free_percentage]
timestamps = timestamps[free_index]
ndvis = data[index][free_index]
cloud_percs = np.array(cloud_percs)[free_index]
rows = len(timestamps) // cols + 1
fig = plt.figure(figsize=(cols * 4, rows * 4))
fig.tight_layout()
for i in range(len(timestamps)):
img = ndvis[i].values.astype('float64')
fig.add_subplot(rows, cols, i + 1)
im = plt.imshow(img, vmin=-0.5, vmax=1, cmap=colormap)
plt.title('{} for {}'.format(index.upper(), pd.to_datetime(timestamps[i]).date()), size=10)
plt.plot(x, y, color='r')
plt.axis('off')
fig.subplots_adjust(right=0.9)
cbar_ax = fig.add_axes([0.95, 0.15, 0.02, 0.7])
fig.colorbar(im, cax=cbar_ax)
if show:
plt.show()
else:
return fig
def create_rgb(geom,start_time,end_time, buffer = None):
dc = datacube.Datacube(app="test", config="/home/noa/datacube.conf")
bounds = [(round5(x), round5(y)) for x, y in geom.exterior.coords]
if not buffer is None:
geom2 = geom.buffer(buffer)
else:
geom2 = geom
query = {
'geopolygon': geom2,
'time': (start_time, end_time),
'product': "s2_preprocessed_v2"
}
data = dc.load(measurements=['B02', 'B03', 'B04', 'SCL'],**query)
mask = geometry_mask([geom], data.geobox, invert=True)
all_pixels = np.count_nonzero(mask)
nan_array = data['SCL'].where((data['SCL']>=3) & (data['SCL']<7) & mask)
timestamps = data.time.values
cloud_percs = []
for i in range(len(timestamps)):
free_pixels = nan_array[i].count().values
cloud_perc = (free_pixels / all_pixels) * 100
cloud_percs.append(cloud_perc)
bounds_xy = [(np.where(data.x == x)[0][0], np.where(data.y == y)[0][0]) for x, y in bounds]
x = [point[0] for point in bounds_xy]
y = [point[1] for point in bounds_xy]
rgb = np.stack((data['B02'].values, data['B03'].values, data['B04'].values), axis=2)
rgb = rgb.swapaxes(0, 3).swapaxes(0, 1).astype('float32')
return rgb,timestamps, x, y, cloud_percs
def plot_rgb(rgb,timestamps, x, y, cloud_percs, cloud_free_percentage = 100, cols=6):
free_index = [i for i in range(len(cloud_percs)) if cloud_percs[i] > cloud_free_percentage]
rgb = rgb[:,:,:,free_index]
timestamps = timestamps[free_index]
rows = len(timestamps) // cols + 1
fig = plt.figure(figsize=(cols*4,rows*4))
fig.tight_layout()
for i in range(len(timestamps)):
img = rgb[:, :, :, i]
array_min, array_max = np.nanmin(img), np.nanmax(img)
img = (img - array_min) / (array_max - array_min + 1e-7)
fig.add_subplot(rows, cols, i + 1)
plt.imshow(img)
plt.title('RGB for {}'.format( | pd.to_datetime(timestamps[i]) | pandas.to_datetime |
#!/usr/bin/env python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import logging
import logging.config
import os
import netCDF4 as nc
import scipy.interpolate
from smos_tools.data_types.os_udp_datatype import datatype, science_flags_dtype, control_flags_dtype
from smos_tools.logger.logging_config import logging_config
def read_os_udp(filename):
"""
Read the Ocean Salinity User Data Product file.
:param filename: path to .DBL file
:return: numpy structured array
"""
# check the files are udp files
if os.path.basename(filename)[14:17] != 'UDP':
raise ValueError('{} is not a UDP file'.format(filename))
try:
file = open(filename, 'rb')
except IOError:
logging.exception('file {} does not exist'.format(filename))
raise
logging.debug('Reading file...')
# Read first unsigned int32, containing number of grid points to iterate over
n_grid_points = np.fromfile(file, dtype=np.uint32, count=1)[0]
data = np.fromfile(file, dtype=np.dtype(datatype), count=n_grid_points)
file.close()
logging.debug('Done.')
return data
def unpack_control_flags(control_flag_data):
"""
Unpacks the control flags into a numpy structured array so they can be made into a dataframe later
:param control_flag_data: A control flag part of the data from read_os_udp
:return: a numpy structured array
"""
# Make the empty array with the right dtype for the data
unpacked_flags = np.empty((len(control_flag_data)), dtype=control_flags_dtype)
# unpack from Least Significant Bit
for position in range(0, len(control_flags_dtype)):
unpacked_flags[control_flags_dtype[position][0]] = (control_flag_data >> position) & 1
return unpacked_flags
def unpack_science_flags(science_flag_data):
"""
Unpacks the control flags into a numpy structured array so they can be made into a dataframe later
:param science_flag_data: A science flag part of the data from read_os_udp
:return: a numpy structured array
"""
# Make the empty array with the right dtype for the data
unpacked_flags = np.empty((len(science_flag_data)), dtype=science_flags_dtype)
# unpack from Least Significant Bit
for position in range(0, len(science_flags_dtype)):
unpacked_flags[science_flags_dtype[position][0]] = (science_flag_data >> position) & 1
return unpacked_flags
def extract_field(data, fieldname='SSS1'):
"""
Converts the structured array into a pandas small dataframe.
:param data: numpy structured array (record array).
:param fieldname: string (a field name from variable dtypes).
:return: pandas dataframe (columns are Mean_acq_time, Latitude, Longitude and fieldname)
Mean_acq_time is expressed in UTC decimal days (MJD2000 reference).
"""
# NOTE there is a difference between how OS and SM handle mean acquisition time.
# For OS this is a float expressed in UTC decimal days (in MJD2000) reference,
# while for SM this has already been split into Days, seconds and microseconds
time_frame = pd.DataFrame(data['Geophysical_Parameters_Data']['Mean_acq_time'], columns=['Mean_acq_time'])
gridpoint_id_frame = pd.DataFrame(data['Grid_Point_Data']['Grid_Point_ID'], columns=['Grid_Point_ID'])
lat_frame = pd.DataFrame(data['Grid_Point_Data']['Latitude'], columns=['Latitude'])
lon_frame = pd.DataFrame(data['Grid_Point_Data']['Longitude'], columns=['Longitude'])
# Hande which sub dictionary the field might be in
geophys = [elem[0] for elem in datatype[1][1]]
confidence = [elem[0] for elem in datatype[6][1]]
if fieldname in geophys:
dict_part = 'Geophysical_Parameters_Data'
elif fieldname in confidence:
dict_part = 'Product_Confidence_Descriptors'
else:
logging.error("ERROR: Couldn't find fieldname '{}' in "
"'Geophysical_Parameters_Data' or 'Product_Confidence_Descriptors'".format(fieldname))
raise KeyError("{} not in Geophysical_Parameters_Data or Product_Confidence_Descriptors".format(fieldname))
if fieldname in ['Dg_chi2_1', 'Dg_chi2_2']:
field_frame = pd.DataFrame(data[dict_part][fieldname]/100., columns=[fieldname])
else:
field_frame = pd.DataFrame(data[dict_part][fieldname], columns=[fieldname])
dataframe = pd.concat([time_frame,
gridpoint_id_frame, lat_frame, lon_frame, field_frame], axis=1)
dataframe = dataframe.replace(-999, np.NaN)
dataframe.dropna(axis=0, inplace=True)
return dataframe
def interpolate_udp_field(data, field='SSS1', latmin=-90, latmax=90, lonmin=-180, lonmax=180, delta=0.25, dist_threshold=0.25):
"""
Interpolates a given geophysical field of the udp file over a regular grid, according to nearest neighbour.
:param data: structured array from udp file.
:param field: one field name among the geophysical parameters of the udp structured array
:param latmin: minimum latitude of the regular grid
:param latmax: maximum latitude of the regular grid
:param lonmin: minimum longitude of the regular grid
:param lonmax: maximum longitude of the regular grid
:param delta: distance in degrees between two points on the regular grid
:param dist_threshold: maximum distance accepted for nearest neighbour interpolation
:return: numpy arrays: lats, lons, field value
"""
lats = np.arange(latmin, latmax, delta)
lons = np.arange(lonmin, lonmax, delta)
data_out = np.empty((lats.size, lons.size))
data_out[:] = np.nan
dist_threshold = 0.25 # threshold ditance value for nearest interpolation (in degrees)
for index_value, value in enumerate(data['Geophysical_Parameters_Data'][field]):
if value != -999.:
i_lat = np.argmin(np.abs(lats - data['Grid_Point_Data']['Latitude'][index_value]))
i_lon = np.argmin(np.abs(lons - data['Grid_Point_Data']['Longitude'][index_value]))
if (np.min(np.abs(lats - data['Grid_Point_Data']['Latitude'][index_value])) < dist_threshold) & \
(np.min(np.abs(lons - data['Grid_Point_Data']['Longitude'][index_value])) < dist_threshold):
data_out[i_lat, i_lon] = value
return lats, lons, data_out
def read_and_interpolate_isas(filename, latmin=-90, latmax=90, lonmin=-180, lonmax=180, delta=0.25, dist_threshold=0.25, return_df=False):
"""
Interpolates isas on a regular grid
:param filename: path/to/isas/file
:param latmin: minimum latitude of the regular grid
:param latmax: maximum latitude of the regular grid
:param lonmin: minimum longitude of the regular grid
:param lonmax: maximum longitude of the regular grid
:param delta: distance in degrees between two points on the regular grid
:param dist_threshold: maximum distance accepted for nearest neighbour interpolation
:param return_df: if False, returns numpy array; if true, returns a pandas dataframe
:return: data frame with lat, lon, field value
"""
lats = np.arange(latmin, latmax, delta)
lons = np.arange(lonmin, lonmax, delta)
mlons, mlats = np.meshgrid(lons, lats)
dataset = nc.Dataset(filename)
isas_lat = dataset.variables['latitude'][:]
isas_lon = dataset.variables['longitude'][:]
isas_sss = dataset.variables['PSAL'][0, 0, :, :]
isas_pcv = dataset.variables['PSAL_PCTVAR'][0, 0, :, :]
dataset.close()
isas_mlon, isas_mlat = np.meshgrid(isas_lon, isas_lat)
isas_interp = scipy.interpolate.griddata(
(isas_mlon.flatten(), isas_mlat.flatten()),
isas_sss.flatten(),
(mlons, mlats),
method='nearest',
)
pcv_interp = scipy.interpolate.griddata(
(isas_mlon.flatten(), isas_mlat.flatten()),
isas_pcv.flatten(),
(mlons, mlats),
method='nearest',
)
if return_df == True:
lat_frame = pd.DataFrame(mlats.flatten(), columns=['Latitude'])
lon_frame = pd.DataFrame(mlons.flatten(), columns=['Longitude'])
pcv_frame = pd.DataFrame(pcv_interp.flatten(), columns=['PSAL_PCTVAR'])
field_frame = pd.DataFrame(isas_interp.flatten(), columns=['PSAL'])
dataframe = pd.concat([lat_frame, lon_frame, pcv_frame, field_frame], axis=1)
print(dataframe)
return dataframe
else:
return isas_interp
def plot_os_bias(udp_filename, isas_filename, field='SSS1'):
"""
Plots salinity bias against isas salinity.
:param udp_filename: path/to/os/udp/file
:param isas_filename: path/to/isas/file
:return: a plot
"""
data_udp = read_os_udp(udp_filename)
lats, lons, udp_interp = interpolate_udp_field(data_udp, field=field)
isas_interp = read_and_interpolate_isas(isas_filename)
bias = udp_interp - isas_interp
mlons, mlats = np.meshgrid(lons, lats)
valid_index = (udp_interp > 0)
mlons_valid = mlons[valid_index]
fig1 = plt.figure()
centre_lon = mlons_valid.mean()
lat_0 = 5.
lon_0 = centre_lon
width = 110574 * 70 # ~100km * 70 deg
height = 10 ** 5 * 170 # 100km * 140 deg
dot_size = 1
m = Basemap(
projection='poly',
lat_0=lat_0,
lon_0=lon_0,
width=width,
height=height,
resolution='l')
m.drawcoastlines(linewidth=0.5)
m.fillcontinents()
# labels [left, right, top, bottom]
m.drawparallels(np.arange(-80., 80., 20.), labels=[True, False, False, False], fontsize=8)
m.drawmeridians(np.arange(-180, 180, 20.), labels=[False, False, False, True], fontsize=8, rotation=45)
m.drawmapboundary()
plt.title('SSS bias')
cmap = 'bwr'
vmin = -1.
vmax = +1.
m.scatter(mlons,
mlats,
latlon=True,
c=bias,
s=dot_size,
zorder=10,
cmap=cmap,
vmin=-1,
vmax=+1,
)
cbar = m.colorbar()
cbar.set_label('[pss]')
plt.show()
print(np.mean(np.abs(bias[udp_interp > 0])))
def setup_os_plot(lat, long):
"""
Sets up the orbit plot for ocean salinity
:param lat: a list of latitudes
:param long: a list of longitudes
:return: figure object, basemap object, dot_size
"""
fig1 = plt.figure()
centre_lon = long.mean()
centre_lat = lat.mean()
# find a min and max lat and long
# +-4 took from soil moisture plotting funct
min_lon = max(long.min() - 4, -180.)
max_lon = min(long.max() + 4, +180.)
min_lat = max(lat.min() - 4, -90.)
max_lat = min(lat.max() + 4, +90.)
delta_lon = np.abs(max_lon - min_lon)
delta_lat = np.abs(max_lat - min_lat)
if delta_lat > 45: # for full orbit
# lat_0 = 10. for soil moisture is 10
lat_0 = 5.
lon_0 = centre_lon
width = 110574 * 70 # ~100km * 70 deg
# height = 140 * 10**5 # 100km * 140 deg
height = 10 ** 5 * 170 # 100km * 140 deg
dot_size = 1
else:
lat_0 = centre_lat
lon_0 = centre_lon
width = delta_lon * 110574
height = delta_lat * 10 ** 5
dot_size = 5
m = Basemap(
projection='poly',
lat_0=lat_0,
lon_0=lon_0,
width=width,
height=height,
resolution='l')
m.drawcoastlines(linewidth=0.5)
m.fillcontinents()
# labels [left, right, top, bottom]
m.drawparallels(np.arange(-80., 80., 20.), labels=[True, False, False, False], fontsize=8)
m.drawmeridians(np.arange(-180, 180, 20.), labels=[False, False, False, True], fontsize=8, rotation=45)
m.drawmapboundary()
return fig1, m, dot_size
def plot_os_orbit(os_df, fieldname='SSS1', vmin=None, vmax=None):
"""
Plot the ocean salinity UDP field fieldname.
:param os_df: pandas dataframe containing Soil Moisture with index Days, Seconds, Microseconds, Grid_Point_ID
:param fieldname: string fieldname of the data field to compare
:return:
"""
logging.debug('Plotting {} orbit...'.format(fieldname))
figure, m, dot_size = setup_os_plot(os_df['Latitude'].values, os_df['Longitude'].values)
if fieldname in ['SSS1', 'SSS2']:
plt.title(fieldname)
cmap = 'viridis'
c = os_df[fieldname] # geophysical variable to plot
if vmin == None:
vmin = 32.
if vmax == None:
vmax = 38.
m.scatter(os_df['Longitude'].values,
os_df['Latitude'].values,
latlon=True,
c=c,
s=dot_size,
zorder=10,
cmap=cmap,
vmin=vmin,
vmax=vmax,
)
cbar = m.colorbar()
cbar.set_label('[pss]')
elif fieldname == 'SSS3': # SSS anomaly
plt.title('SSS anomaly')
cmap = 'bwr'
c = os_df[fieldname] # geophysical variable to plot
if vmin == None:
vmin = -0.5
if vmax== None:
vmax = +0.5
m.scatter(os_df['Longitude'].values,
os_df['Latitude'].values,
latlon=True,
c=c,
s=dot_size,
zorder=10,
cmap=cmap,
vmin=vmin,
vmax=vmax,
)
cbar = m.colorbar()
elif fieldname in ['Dg_chi2_1', 'Dg_chi2_2']:
plt.title('Chi2')
cmap = 'jet'
c = os_df[fieldname]
if vmin == None:
vmin = 1.0
if vmax== None:
vmax = 1.3
m.scatter(os_df['Longitude'].values,
os_df['Latitude'].values,
latlon=True,
c=c,
s=dot_size,
zorder=10,
cmap=cmap,
vmin=vmin,
vmax=vmax,
)
cbar = m.colorbar()
else:
plt.title(fieldname)
cmap = 'viridis'
c = os_df[fieldname] # geophysical variable to plot
m.scatter(os_df['Longitude'].values,
os_df['Latitude'].values,
latlon=True,
c=c,
s=dot_size,
zorder=10,
cmap=cmap,
)
cbar = m.colorbar()
plt.show()
def plot_os_difference(os_df, fieldname='SSS1', vmin=-1, vmax=+1):
"""
Plot the ocean salinity UDP difference for fieldname.
:param os_df: pandas dataframe containing Soil Moisture with index Days, Seconds, Microseconds, Grid_Point_ID
:param fieldname: string fieldname of the data field to compare
:return:
"""
logging.debug('Plotting {} ...'.format(fieldname))
figure, m, dot_size = setup_os_plot(os_df['Latitude'].values, os_df['Longitude'].values)
plt.title(fieldname)
cmap = 'bwr'
c = os_df[fieldname] # geophysical variable to plot
m.scatter(os_df['Longitude'].values,
os_df['Latitude'].values,
latlon=True,
c=c,
s=dot_size,
zorder=10,
cmap=cmap,
vmin=vmin,
vmax=vmax)
cbar = m.colorbar()
plt.show()
def evaluate_field_diff(frame1, frame2, fieldname='SSS1', vmin=-1, vmax=+1, xaxis='Latitude'):
"""
Plot the difference between two dataframes for a given field. Gives map plots and scatter.
:param frame1: pandas dataframe containing the requested data field and index (Days, Seconds, Microseconds, Grid_Point_ID)
:param frame2: pandas dataframe containing the requested data field and index (Days, Seconds, Microseconds, Grid_Point_ID)
:param fieldname: String fieldname of the data field to compare
:param vmin: Minimum value visible on plot. Lower values saturate.
:param vmax: Maximum value visible on plot. Higher values saturate.
:param xaxis: Varible againt which the fieldname is plotted. One of: {'Latitude', 'Grid_Point_ID'}
:return:
"""
logging.debug('Evaluating difference between 2 dataframes for field {}...'.format(fieldname))
# Print record counts
logging.debug('Dataset 1 contains {} valid datarows'.format(len(frame1.index)))
logging.debug('Dataset 2 contains {} valid datarows'.format(len(frame2.index)))
# Get records in common
common = | pd.merge(frame1, frame2, how='inner', on=['Mean_acq_time', 'Grid_Point_ID']) | pandas.merge |
import urllib.request
from bs4 import BeautifulSoup
import pandas as pd
from ntpath import basename
import random
from datetime import datetime
from dateutil import relativedelta
import sqlite3
month_mapping = {'Jan':'01', 'Feb':'02', 'Mar':'03', 'Apr':'04', 'May':'05', 'Jun':'06',
'Jul':'07', 'Aug':'08', 'Sep':'09', 'Oct':'10', 'Nov':'11', 'Dec':'12', }
main_page = 'http://www.guide2research.com/topconf'
page = urllib.request.urlopen(main_page)
soup = BeautifulSoup(page, "lxml")
all_div_tags = soup.find_all('div', class_='grey myshad')[1:]
top_final = []
for ind, div_tag in enumerate(all_div_tags):
try:
hindex = int(div_tag.find_all('b')[0].text)
country = div_tag.find_all('b')[1].text
organizer = basename(div_tag.find_all('img')[0]['src']).split('_')[0]
temp = div_tag.find_all('div', style=lambda value: value and 'padding:0px' in value and 'margin:0px' in value)
common_temp = temp[0].text.split('-')
date = common_temp[0].replace(',','').strip().split(' ')
date = date[1] + '-' + month_mapping[date[0]] + '-' + date[-1]
location = common_temp[-1].strip().split(',')[0].strip()
website = temp[1].text.strip()
temp = div_tag.find_all('h4')[0].findChildren("a" , recursive=False)
name = temp[0].text.strip()
link = main_page + temp[0]['href']
d = datetime.strptime(date, "%d-%m-%Y")
deadline = str(d - relativedelta.relativedelta(months=4, days=random.randint(1,30))).split()[0].split('-')
deadline = deadline[-1] + '-' + deadline[1] + '-' + deadline[0]
top_final.append([date, link, name, organizer, location, country, website, deadline, hindex])
except:
print(ind)
continue
df = pd.DataFrame(top_final, columns=['Date', 'Link', 'Name', 'Organizer', 'Location', 'Country', 'Website', 'Deadline', 'Hindex'])
df.to_csv('TopConf.csv', header=True, index=False)
dataframe = | pd.read_csv("TopConf.csv") | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
this file is intended to provide a folium map object
for a station selection. Please do not use directly,
but import station and change the output format.
from icoscp.station import station
station.getIdList(.......outfmt='map')
"""
__author__ = ["<NAME>"]
__credits__ = "ICOS Carbon Portal"
__license__ = "GPL-3.0"
__version__ = "0.1.0"
__maintainer__ = "ICOS Carbon Portal, elaborated products team"
__email__ = ['<EMAIL>', '<EMAIL>']
__status__ = "rc1"
__date__ = "2021-09-20"
# Standard library imports.
import json
# Related third party imports.
import folium
import pandas as pd
from folium.plugins import MarkerCluster
import requests
def get(queried_stations, project):
"""Generates a folium map of stations.
Uses the requested stations dataframe along with the REST countries
API to generate an interactive folium map. Each marker in the
folium map represents a station of observations.
Parameters
----------
queried_stations : pandas.Dataframe
`queried_stations` dataframe includes each station's landing
page or (uri), id, name, country, lat, lon, elevation, project,
and theme.
project : str
The name of the project that the user inserted in the caller
function in order to search for stations.
Returns
-------
stations_map : folium.Map
`stations_map` is an interactive map used for visualizing
geospatial data.
"""
# Request countries data online.
response = request_rest_countries()
# Edit the requested data.
edited_response = collect_rest_data(response)
# Apply countries data on the queried stations and remove stations
# without a fixed location.
stations = edit_queried_stations(queried_stations, edited_response)
stations_map = folium.Map()
# Provide the map name within the top right menu.
if project == 'ALL':
cluster_name = ', '.join(['ICOS', 'NEON', 'INGOS', 'FLUXNET'])
else:
cluster_name = project
marker_cluster = MarkerCluster(name=cluster_name)
# Add tile layers to the folium map. Default is 'openstreetmap'.
add_tile_layers(stations_map)
# Use the stations at the most southwest and northeast locations
# and bind the map within these stations.
sw_loc = stations[['lat', 'lon']].dropna(axis=0).min().values.tolist()
ne_loc = stations[['lat', 'lon']].dropna(axis=0).max().values.tolist()
stations_map.fit_bounds([sw_loc, ne_loc])
stations = stations.transpose()
for station_index in stations:
# Collect each station's info.
station_info = stations[station_index]
# Create the html popup message for each station.
popup = folium.Popup(generate_popup_html(station_info, response))
if response['service']:
# Set the icon for each marker using the country's flag.
icon = folium.CustomIcon(icon_image=station_info.flag, icon_size=(20, 14))
else:
icon = folium.Icon(color='blue', icon_color='white', icon='info_sign')
# Add a marker for each station at the station's location
# along with the popup and the tooltip.
station_marker = folium.Marker(location=[station_info.lat, station_info.lon],
tooltip='<b>' + station_info.id + '</b>',
popup=popup,
icon=icon)
# Add the station marker to the cluster.
marker_cluster.add_child(station_marker)
# Add the cluster and the layer control to the folium map.
stations_map.add_child(marker_cluster)
stations_map.add_child(folium.LayerControl())
return stations_map
def request_rest_countries():
"""Requests data from rest-countries API.
This function uses https://restcountries.com/ API to request data
(names ,country-codes and flags) for countries.
Returns
-------
response : dict
Returns a dictionary with countries data obtained from
rest-countries API. The `service` key validates the source of
the data ('com', or False). If the request fails the `service`
key has a value of False.
Raises
------
HTTPError
An HTTPError exception is raised if the requested REST
countries data is unavailable.
SSLError
An SSLError exception is raised if the requested resources have
an untrusted SSL certificate.
"""
response = {'service': False}
response_com = None
try:
# Try to request countries data from
# https://restcountries.com/ REST-ful API.
response_com = requests.get(
'https://restcountries.com/v2/all?fields=name,flags,alpha2Code')
response_com.raise_for_status()
except (requests.exceptions.HTTPError, requests.exceptions.SSLError) as e:
print('Restcountries \'.com\' request error: ' + str(e))
if response_com:
response = {'service': 'com', 'data': response_com}
# If the request failed, the response will contain a False service
# value which is then used by the caller function to generate the
# folium map without any errors but with less information.
return response
def collect_rest_data(response):
"""Extracts raw rest-countries data from requested resources.
Parameters
----------
response : dict
If the requested resources were available the `response`
dictionary will contain the raw rest-countries data.
Returns
-------
response : dict
Returns an updated version of the `response` dictionary
obtained from `request_rest_countries()` function. If the
request was successful, this version will include the
rest-countries data that was extracted from the request.
"""
# Rest-countries resources are available.
if response['service']:
json_countries = json.loads(response['data'].text)
countries_data = {}
# Use the requested data to create a dictionary of country
# names, codes, and flags.
for country in json_countries:
code = country['alpha2Code']
country_name = country['name']
country_flag = country['flags']['svg']
countries_data[code] = {'name': country_name, 'flag': country_flag}
# Include the 'UK' alpha2code which is missing from
# restcountries API.
countries_data['UK'] = countries_data['GB']
# Add the created dictionary to the response.
response['countries_data'] = countries_data
return response
def edit_queried_stations(queried_stations, edited_response):
"""Applies new data on queried stations.
Uses the `edited_response` dictionary to add data to the queried
stations which are then appended to a new dataframe.
Parameters
----------
queried_stations : pandas.Dataframe
`queried_stations` dataframe includes each station's landing
page or (uri), id, name, country, lat, lon, elevation, project,
and theme.
edited_response: dict
`edited_response` is an updated version of the `response`
dictionary obtained from `request_rest_countries()` function.
If the request was successful, this version will include the
rest-countries data that was extracted using the
`collect_rest_data()` function.
Returns
-------
edited_stations : pandas.Dataframe
`edited_stations` is an updated version of the
`queried_stations` dataframe which was obtained using a sparql
query. Also this edited version is stripped off stations
without a fixed position (Instrumented ships of opportunity).
"""
edited_stations = | pd.DataFrame() | pandas.DataFrame |
import warnings
from itertools import product
import numpy as np
import pandas as pd
import pytest
from xarray import DataArray, Variable, coding, decode_cf
from xarray.coding.times import (
_import_cftime, cftime_to_nptime, decode_cf_datetime, encode_cf_datetime)
from xarray.conventions import _update_bounds_attributes
from xarray.core.common import contains_cftime_datetimes
from xarray.testing import assert_equal
from . import (
assert_array_equal, has_cftime, has_cftime_or_netCDF4, has_dask,
requires_cftime_or_netCDF4)
_NON_STANDARD_CALENDARS_SET = {'noleap', '365_day', '360_day',
'julian', 'all_leap', '366_day'}
_ALL_CALENDARS = sorted(_NON_STANDARD_CALENDARS_SET.union(
coding.times._STANDARD_CALENDARS))
_NON_STANDARD_CALENDARS = sorted(_NON_STANDARD_CALENDARS_SET)
_STANDARD_CALENDARS = sorted(coding.times._STANDARD_CALENDARS)
_CF_DATETIME_NUM_DATES_UNITS = [
(np.arange(10), 'days since 2000-01-01'),
(np.arange(10).astype('float64'), 'days since 2000-01-01'),
(np.arange(10).astype('float32'), 'days since 2000-01-01'),
(np.arange(10).reshape(2, 5), 'days since 2000-01-01'),
(12300 + np.arange(5), 'hours since 1680-01-01 00:00:00'),
# here we add a couple minor formatting errors to test
# the robustness of the parsing algorithm.
(12300 + np.arange(5), 'hour since 1680-01-01 00:00:00'),
(12300 + np.arange(5), 'Hour since 1680-01-01 00:00:00'),
(12300 + np.arange(5), ' Hour since 1680-01-01 00:00:00 '),
(10, 'days since 2000-01-01'),
([10], 'daYs since 2000-01-01'),
([[10]], 'days since 2000-01-01'),
([10, 10], 'days since 2000-01-01'),
(np.array(10), 'days since 2000-01-01'),
(0, 'days since 1000-01-01'),
([0], 'days since 1000-01-01'),
([[0]], 'days since 1000-01-01'),
(np.arange(2), 'days since 1000-01-01'),
(np.arange(0, 100000, 20000), 'days since 1900-01-01'),
(17093352.0, 'hours since 1-1-1 00:00:0.0'),
([0.5, 1.5], 'hours since 1900-01-01T00:00:00'),
(0, 'milliseconds since 2000-01-01T00:00:00'),
(0, 'microseconds since 2000-01-01T00:00:00'),
(np.int32(788961600), 'seconds since 1981-01-01'), # GH2002
(12300 + np.arange(5), 'hour since 1680-01-01 00:00:00.500000')
]
_CF_DATETIME_TESTS = [num_dates_units + (calendar,) for num_dates_units,
calendar in product(_CF_DATETIME_NUM_DATES_UNITS,
_STANDARD_CALENDARS)]
def _all_cftime_date_types():
try:
import cftime
except ImportError:
import netcdftime as cftime
return {'noleap': cftime.DatetimeNoLeap,
'365_day': cftime.DatetimeNoLeap,
'360_day': cftime.Datetime360Day,
'julian': cftime.DatetimeJulian,
'all_leap': cftime.DatetimeAllLeap,
'366_day': cftime.DatetimeAllLeap,
'gregorian': cftime.DatetimeGregorian,
'proleptic_gregorian': cftime.DatetimeProlepticGregorian}
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason='cftime not installed')
@pytest.mark.parametrize(['num_dates', 'units', 'calendar'],
_CF_DATETIME_TESTS)
def test_cf_datetime(num_dates, units, calendar):
cftime = _import_cftime()
if cftime.__name__ == 'cftime':
expected = cftime.num2date(num_dates, units, calendar,
only_use_cftime_datetimes=True)
else:
expected = cftime.num2date(num_dates, units, calendar)
min_y = np.ravel(np.atleast_1d(expected))[np.nanargmin(num_dates)].year
max_y = np.ravel(np.atleast_1d(expected))[np.nanargmax(num_dates)].year
if min_y >= 1678 and max_y < 2262:
expected = cftime_to_nptime(expected)
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
'Unable to decode time axis')
actual = coding.times.decode_cf_datetime(num_dates, units,
calendar)
abs_diff = np.atleast_1d(abs(actual - expected)).astype(np.timedelta64)
# once we no longer support versions of netCDF4 older than 1.1.5,
# we could do this check with near microsecond accuracy:
# https://github.com/Unidata/netcdf4-python/issues/355
assert (abs_diff <= np.timedelta64(1, 's')).all()
encoded, _, _ = coding.times.encode_cf_datetime(actual, units,
calendar)
if '1-1-1' not in units:
# pandas parses this date very strangely, so the original
# units/encoding cannot be preserved in this case:
# (Pdb) pd.to_datetime('1-1-1 00:00:0.0')
# Timestamp('2001-01-01 00:00:00')
assert_array_equal(num_dates, np.around(encoded, 1))
if (hasattr(num_dates, 'ndim') and num_dates.ndim == 1 and
'1000' not in units):
# verify that wrapping with a pandas.Index works
# note that it *does not* currently work to even put
# non-datetime64 compatible dates into a pandas.Index
encoded, _, _ = coding.times.encode_cf_datetime(
pd.Index(actual), units, calendar)
assert_array_equal(num_dates, np.around(encoded, 1))
@requires_cftime_or_netCDF4
def test_decode_cf_datetime_overflow():
# checks for
# https://github.com/pydata/pandas/issues/14068
# https://github.com/pydata/xarray/issues/975
try:
from cftime import DatetimeGregorian
except ImportError:
from netcdftime import DatetimeGregorian
datetime = DatetimeGregorian
units = 'days since 2000-01-01 00:00:00'
# date after 2262 and before 1678
days = (-117608, 95795)
expected = (datetime(1677, 12, 31), datetime(2262, 4, 12))
for i, day in enumerate(days):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'Unable to decode time axis')
result = coding.times.decode_cf_datetime(day, units)
assert result == expected[i]
def test_decode_cf_datetime_non_standard_units():
expected = pd.date_range(periods=100, start='1970-01-01', freq='h')
# netCDFs from madis.noaa.gov use this format for their time units
# they cannot be parsed by cftime, but pd.Timestamp works
units = 'hours since 1-1-1970'
actual = coding.times.decode_cf_datetime(np.arange(100), units)
assert_array_equal(actual, expected)
@requires_cftime_or_netCDF4
def test_decode_cf_datetime_non_iso_strings():
# datetime strings that are _almost_ ISO compliant but not quite,
# but which cftime.num2date can still parse correctly
expected = pd.date_range(periods=100, start='2000-01-01', freq='h')
cases = [(np.arange(100), 'hours since 2000-01-01 0'),
(np.arange(100), 'hours since 2000-1-1 0'),
(np.arange(100), 'hours since 2000-01-01 0:00')]
for num_dates, units in cases:
actual = coding.times.decode_cf_datetime(num_dates, units)
abs_diff = abs(actual - expected.values)
# once we no longer support versions of netCDF4 older than 1.1.5,
# we could do this check with near microsecond accuracy:
# https://github.com/Unidata/netcdf4-python/issues/355
assert (abs_diff <= np.timedelta64(1, 's')).all()
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason='cftime not installed')
@pytest.mark.parametrize('calendar', _STANDARD_CALENDARS)
def test_decode_standard_calendar_inside_timestamp_range(calendar):
cftime = _import_cftime()
units = 'days since 0001-01-01'
times = pd.date_range('2001-04-01-00', end='2001-04-30-23', freq='H')
time = cftime.date2num(times.to_pydatetime(), units, calendar=calendar)
expected = times.values
expected_dtype = np.dtype('M8[ns]')
actual = coding.times.decode_cf_datetime(time, units, calendar=calendar)
assert actual.dtype == expected_dtype
abs_diff = abs(actual - expected)
# once we no longer support versions of netCDF4 older than 1.1.5,
# we could do this check with near microsecond accuracy:
# https://github.com/Unidata/netcdf4-python/issues/355
assert (abs_diff <= np.timedelta64(1, 's')).all()
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason='cftime not installed')
@pytest.mark.parametrize('calendar', _NON_STANDARD_CALENDARS)
def test_decode_non_standard_calendar_inside_timestamp_range(
calendar):
cftime = _import_cftime()
units = 'days since 0001-01-01'
times = pd.date_range('2001-04-01-00', end='2001-04-30-23',
freq='H')
non_standard_time = cftime.date2num(
times.to_pydatetime(), units, calendar=calendar)
if cftime.__name__ == 'cftime':
expected = cftime.num2date(
non_standard_time, units, calendar=calendar,
only_use_cftime_datetimes=True)
else:
expected = cftime.num2date(non_standard_time, units,
calendar=calendar)
expected_dtype = np.dtype('O')
actual = coding.times.decode_cf_datetime(
non_standard_time, units, calendar=calendar)
assert actual.dtype == expected_dtype
abs_diff = abs(actual - expected)
# once we no longer support versions of netCDF4 older than 1.1.5,
# we could do this check with near microsecond accuracy:
# https://github.com/Unidata/netcdf4-python/issues/355
assert (abs_diff <= np.timedelta64(1, 's')).all()
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason='cftime not installed')
@pytest.mark.parametrize('calendar', _ALL_CALENDARS)
def test_decode_dates_outside_timestamp_range(calendar):
from datetime import datetime
cftime = _import_cftime()
units = 'days since 0001-01-01'
times = [datetime(1, 4, 1, h) for h in range(1, 5)]
time = cftime.date2num(times, units, calendar=calendar)
if cftime.__name__ == 'cftime':
expected = cftime.num2date(time, units, calendar=calendar,
only_use_cftime_datetimes=True)
else:
expected = cftime.num2date(time, units, calendar=calendar)
expected_date_type = type(expected[0])
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'Unable to decode time axis')
actual = coding.times.decode_cf_datetime(
time, units, calendar=calendar)
assert all(isinstance(value, expected_date_type) for value in actual)
abs_diff = abs(actual - expected)
# once we no longer support versions of netCDF4 older than 1.1.5,
# we could do this check with near microsecond accuracy:
# https://github.com/Unidata/netcdf4-python/issues/355
assert (abs_diff <= np.timedelta64(1, 's')).all()
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason='cftime not installed')
@pytest.mark.parametrize('calendar', _STANDARD_CALENDARS)
def test_decode_standard_calendar_single_element_inside_timestamp_range(
calendar):
units = 'days since 0001-01-01'
for num_time in [735368, [735368], [[735368]]]:
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
'Unable to decode time axis')
actual = coding.times.decode_cf_datetime(
num_time, units, calendar=calendar)
assert actual.dtype == np.dtype('M8[ns]')
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason='cftime not installed')
@pytest.mark.parametrize('calendar', _NON_STANDARD_CALENDARS)
def test_decode_non_standard_calendar_single_element_inside_timestamp_range(
calendar):
units = 'days since 0001-01-01'
for num_time in [735368, [735368], [[735368]]]:
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
'Unable to decode time axis')
actual = coding.times.decode_cf_datetime(
num_time, units, calendar=calendar)
assert actual.dtype == np.dtype('O')
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason='cftime not installed')
@pytest.mark.parametrize('calendar', _NON_STANDARD_CALENDARS)
def test_decode_single_element_outside_timestamp_range(
calendar):
cftime = _import_cftime()
units = 'days since 0001-01-01'
for days in [1, 1470376]:
for num_time in [days, [days], [[days]]]:
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
'Unable to decode time axis')
actual = coding.times.decode_cf_datetime(
num_time, units, calendar=calendar)
if cftime.__name__ == 'cftime':
expected = cftime.num2date(days, units, calendar,
only_use_cftime_datetimes=True)
else:
expected = cftime.num2date(days, units, calendar)
assert isinstance(actual.item(), type(expected))
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason='cftime not installed')
@pytest.mark.parametrize('calendar', _STANDARD_CALENDARS)
def test_decode_standard_calendar_multidim_time_inside_timestamp_range(
calendar):
cftime = _import_cftime()
units = 'days since 0001-01-01'
times1 = pd.date_range('2001-04-01', end='2001-04-05', freq='D')
times2 = pd.date_range('2001-05-01', end='2001-05-05', freq='D')
time1 = cftime.date2num(times1.to_pydatetime(),
units, calendar=calendar)
time2 = cftime.date2num(times2.to_pydatetime(),
units, calendar=calendar)
mdim_time = np.empty((len(time1), 2), )
mdim_time[:, 0] = time1
mdim_time[:, 1] = time2
expected1 = times1.values
expected2 = times2.values
actual = coding.times.decode_cf_datetime(
mdim_time, units, calendar=calendar)
assert actual.dtype == np.dtype('M8[ns]')
abs_diff1 = abs(actual[:, 0] - expected1)
abs_diff2 = abs(actual[:, 1] - expected2)
# once we no longer support versions of netCDF4 older than 1.1.5,
# we could do this check with near microsecond accuracy:
# https://github.com/Unidata/netcdf4-python/issues/355
assert (abs_diff1 <= np.timedelta64(1, 's')).all()
assert (abs_diff2 <= np.timedelta64(1, 's')).all()
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason='cftime not installed')
@pytest.mark.parametrize('calendar', _NON_STANDARD_CALENDARS)
def test_decode_nonstandard_calendar_multidim_time_inside_timestamp_range(
calendar):
cftime = _import_cftime()
units = 'days since 0001-01-01'
times1 = pd.date_range('2001-04-01', end='2001-04-05', freq='D')
times2 = pd.date_range('2001-05-01', end='2001-05-05', freq='D')
time1 = cftime.date2num(times1.to_pydatetime(),
units, calendar=calendar)
time2 = cftime.date2num(times2.to_pydatetime(),
units, calendar=calendar)
mdim_time = np.empty((len(time1), 2), )
mdim_time[:, 0] = time1
mdim_time[:, 1] = time2
if cftime.__name__ == 'cftime':
expected1 = cftime.num2date(time1, units, calendar,
only_use_cftime_datetimes=True)
expected2 = cftime.num2date(time2, units, calendar,
only_use_cftime_datetimes=True)
else:
expected1 = cftime.num2date(time1, units, calendar)
expected2 = cftime.num2date(time2, units, calendar)
expected_dtype = np.dtype('O')
actual = coding.times.decode_cf_datetime(
mdim_time, units, calendar=calendar)
assert actual.dtype == expected_dtype
abs_diff1 = abs(actual[:, 0] - expected1)
abs_diff2 = abs(actual[:, 1] - expected2)
# once we no longer support versions of netCDF4 older than 1.1.5,
# we could do this check with near microsecond accuracy:
# https://github.com/Unidata/netcdf4-python/issues/355
assert (abs_diff1 <= np.timedelta64(1, 's')).all()
assert (abs_diff2 <= np.timedelta64(1, 's')).all()
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason='cftime not installed')
@pytest.mark.parametrize('calendar', _ALL_CALENDARS)
def test_decode_multidim_time_outside_timestamp_range(
calendar):
from datetime import datetime
cftime = _import_cftime()
units = 'days since 0001-01-01'
times1 = [datetime(1, 4, day) for day in range(1, 6)]
times2 = [datetime(1, 5, day) for day in range(1, 6)]
time1 = cftime.date2num(times1, units, calendar=calendar)
time2 = cftime.date2num(times2, units, calendar=calendar)
mdim_time = np.empty((len(time1), 2), )
mdim_time[:, 0] = time1
mdim_time[:, 1] = time2
if cftime.__name__ == 'cftime':
expected1 = cftime.num2date(time1, units, calendar,
only_use_cftime_datetimes=True)
expected2 = cftime.num2date(time2, units, calendar,
only_use_cftime_datetimes=True)
else:
expected1 = cftime.num2date(time1, units, calendar)
expected2 = cftime.num2date(time2, units, calendar)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'Unable to decode time axis')
actual = coding.times.decode_cf_datetime(
mdim_time, units, calendar=calendar)
assert actual.dtype == np.dtype('O')
abs_diff1 = abs(actual[:, 0] - expected1)
abs_diff2 = abs(actual[:, 1] - expected2)
# once we no longer support versions of netCDF4 older than 1.1.5,
# we could do this check with near microsecond accuracy:
# https://github.com/Unidata/netcdf4-python/issues/355
assert (abs_diff1 <= np.timedelta64(1, 's')).all()
assert (abs_diff2 <= np.timedelta64(1, 's')).all()
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason='cftime not installed')
@pytest.mark.parametrize('calendar', ['360_day', 'all_leap', '366_day'])
def test_decode_non_standard_calendar_single_element(
calendar):
cftime = _import_cftime()
units = 'days since 0001-01-01'
try:
dt = cftime.netcdftime.datetime(2001, 2, 29)
except AttributeError:
# Must be using the standalone cftime library
dt = cftime.datetime(2001, 2, 29)
num_time = cftime.date2num(dt, units, calendar)
actual = coding.times.decode_cf_datetime(
num_time, units, calendar=calendar)
if cftime.__name__ == 'cftime':
expected = np.asarray(cftime.num2date(
num_time, units, calendar, only_use_cftime_datetimes=True))
else:
expected = np.asarray(cftime.num2date(num_time, units, calendar))
assert actual.dtype == np.dtype('O')
assert expected == actual
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason='cftime not installed')
def test_decode_360_day_calendar():
cftime = _import_cftime()
calendar = '360_day'
# ensure leap year doesn't matter
for year in [2010, 2011, 2012, 2013, 2014]:
units = 'days since {0}-01-01'.format(year)
num_times = np.arange(100)
if cftime.__name__ == 'cftime':
expected = cftime.num2date(num_times, units, calendar,
only_use_cftime_datetimes=True)
else:
expected = cftime.num2date(num_times, units, calendar)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
actual = coding.times.decode_cf_datetime(
num_times, units, calendar=calendar)
assert len(w) == 0
assert actual.dtype == np.dtype('O')
assert_array_equal(actual, expected)
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason='cftime not installed')
@pytest.mark.parametrize(
['num_dates', 'units', 'expected_list'],
[([np.nan], 'days since 2000-01-01', ['NaT']),
([np.nan, 0], 'days since 2000-01-01',
['NaT', '2000-01-01T00:00:00Z']),
([np.nan, 0, 1], 'days since 2000-01-01',
['NaT', '2000-01-01T00:00:00Z', '2000-01-02T00:00:00Z'])])
def test_cf_datetime_nan(num_dates, units, expected_list):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'All-NaN')
actual = coding.times.decode_cf_datetime(num_dates, units)
# use pandas because numpy will deprecate timezone-aware conversions
expected = pd.to_datetime(expected_list)
assert_array_equal(expected, actual)
@requires_cftime_or_netCDF4
def test_decoded_cf_datetime_array_2d():
# regression test for GH1229
variable = Variable(('x', 'y'), np.array([[0, 1], [2, 3]]),
{'units': 'days since 2000-01-01'})
result = coding.times.CFDatetimeCoder().decode(variable)
assert result.dtype == 'datetime64[ns]'
expected = pd.date_range('2000-01-01', periods=4).values.reshape(2, 2)
assert_array_equal(np.asarray(result), expected)
@pytest.mark.parametrize(
['dates', 'expected'],
[(pd.date_range('1900-01-01', periods=5),
'days since 1900-01-01 00:00:00'),
(pd.date_range('1900-01-01 12:00:00', freq='H',
periods=2),
'hours since 1900-01-01 12:00:00'),
(pd.to_datetime(
['1900-01-01', '1900-01-02', 'NaT']),
'days since 1900-01-01 00:00:00'),
(pd.to_datetime(['1900-01-01',
'1900-01-02T00:00:00.005']),
'seconds since 1900-01-01 00:00:00'),
(pd.to_datetime(['NaT', '1900-01-01']),
'days since 1900-01-01 00:00:00'),
(pd.to_datetime(['NaT']),
'days since 1970-01-01 00:00:00')])
def test_infer_datetime_units(dates, expected):
assert expected == coding.times.infer_datetime_units(dates)
_CFTIME_DATETIME_UNITS_TESTS = [
([(1900, 1, 1), (1900, 1, 1)], 'days since 1900-01-01 00:00:00.000000'),
([(1900, 1, 1), (1900, 1, 2), (1900, 1, 2, 0, 0, 1)],
'seconds since 1900-01-01 00:00:00.000000'),
([(1900, 1, 1), (1900, 1, 8), (1900, 1, 16)],
'days since 1900-01-01 00:00:00.000000')
]
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason='cftime not installed')
@pytest.mark.parametrize(
'calendar', _NON_STANDARD_CALENDARS + ['gregorian', 'proleptic_gregorian'])
@pytest.mark.parametrize(('date_args', 'expected'),
_CFTIME_DATETIME_UNITS_TESTS)
def test_infer_cftime_datetime_units(calendar, date_args, expected):
date_type = _all_cftime_date_types()[calendar]
dates = [date_type(*args) for args in date_args]
assert expected == coding.times.infer_datetime_units(dates)
@pytest.mark.parametrize(
['timedeltas', 'units', 'numbers'],
[
('1D', 'days', np.int64(1)),
(['1D', '2D', '3D'], 'days', np.array([1, 2, 3], 'int64')),
('1h', 'hours', np.int64(1)),
('1ms', 'milliseconds', np.int64(1)),
('1us', 'microseconds', np.int64(1)),
(['NaT', '0s', '1s'], None, [np.nan, 0, 1]),
(['30m', '60m'], 'hours', [0.5, 1.0]),
('NaT', 'days', np.nan),
(['NaT', 'NaT'], 'days', [np.nan, np.nan]),
])
def test_cf_timedelta(timedeltas, units, numbers):
if timedeltas == 'NaT':
timedeltas = np.timedelta64('NaT', 'ns')
else:
timedeltas = pd.to_timedelta(timedeltas, box=False)
numbers = np.array(numbers)
expected = numbers
actual, _ = coding.times.encode_cf_timedelta(timedeltas, units)
assert_array_equal(expected, actual)
assert expected.dtype == actual.dtype
if units is not None:
expected = timedeltas
actual = coding.times.decode_cf_timedelta(numbers, units)
assert_array_equal(expected, actual)
assert expected.dtype == actual.dtype
expected = np.timedelta64('NaT', 'ns')
actual = coding.times.decode_cf_timedelta(np.array(np.nan), 'days')
assert_array_equal(expected, actual)
def test_cf_timedelta_2d():
timedeltas = ['1D', '2D', '3D']
units = 'days'
numbers = np.atleast_2d([1, 2, 3])
timedeltas = np.atleast_2d(pd.to_timedelta(timedeltas, box=False))
expected = timedeltas
actual = coding.times.decode_cf_timedelta(numbers, units)
assert_array_equal(expected, actual)
assert expected.dtype == actual.dtype
@pytest.mark.parametrize(
['deltas', 'expected'],
[(pd.to_timedelta(['1 day', '2 days']), 'days'),
(pd.to_timedelta(['1h', '1 day 1 hour']), 'hours'),
(pd.to_timedelta(['1m', '2m', np.nan]), 'minutes'),
(pd.to_timedelta(['1m3s', '1m4s']), 'seconds')])
def test_infer_timedelta_units(deltas, expected):
assert expected == coding.times.infer_timedelta_units(deltas)
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason='cftime not installed')
@pytest.mark.parametrize(['date_args', 'expected'],
[((1, 2, 3, 4, 5, 6),
'0001-02-03 04:05:06.000000'),
((10, 2, 3, 4, 5, 6),
'0010-02-03 04:05:06.000000'),
((100, 2, 3, 4, 5, 6),
'0100-02-03 04:05:06.000000'),
((1000, 2, 3, 4, 5, 6),
'1000-02-03 04:05:06.000000')])
def test_format_cftime_datetime(date_args, expected):
date_types = _all_cftime_date_types()
for date_type in date_types.values():
result = coding.times.format_cftime_datetime(date_type(*date_args))
assert result == expected
@pytest.mark.parametrize('calendar', _ALL_CALENDARS)
def test_decode_cf(calendar):
days = [1., 2., 3.]
da = DataArray(days, coords=[days], dims=['time'], name='test')
ds = da.to_dataset()
for v in ['test', 'time']:
ds[v].attrs['units'] = 'days since 2001-01-01'
ds[v].attrs['calendar'] = calendar
if not has_cftime_or_netCDF4 and calendar not in _STANDARD_CALENDARS:
with pytest.raises(ValueError):
ds = decode_cf(ds)
else:
ds = decode_cf(ds)
if calendar not in _STANDARD_CALENDARS:
assert ds.test.dtype == np.dtype('O')
else:
assert ds.test.dtype == np.dtype('M8[ns]')
def test_decode_cf_time_bounds():
da = DataArray(np.arange(6, dtype='int64').reshape((3, 2)),
coords={'time': [1, 2, 3]},
dims=('time', 'nbnd'), name='time_bnds')
attrs = {'units': 'days since 2001-01',
'calendar': 'standard',
'bounds': 'time_bnds'}
ds = da.to_dataset()
ds['time'].attrs.update(attrs)
_update_bounds_attributes(ds.variables)
assert ds.variables['time_bnds'].attrs == {'units': 'days since 2001-01',
'calendar': 'standard'}
dsc = decode_cf(ds)
assert dsc.time_bnds.dtype == np.dtype('M8[ns]')
dsc = decode_cf(ds, decode_times=False)
assert dsc.time_bnds.dtype == np.dtype('int64')
# Do not overwrite existing attrs
ds = da.to_dataset()
ds['time'].attrs.update(attrs)
bnd_attr = {'units': 'hours since 2001-01', 'calendar': 'noleap'}
ds['time_bnds'].attrs.update(bnd_attr)
_update_bounds_attributes(ds.variables)
assert ds.variables['time_bnds'].attrs == bnd_attr
# If bounds variable not available do not complain
ds = da.to_dataset()
ds['time'].attrs.update(attrs)
ds['time'].attrs['bounds'] = 'fake_var'
_update_bounds_attributes(ds.variables)
@pytest.fixture(params=_ALL_CALENDARS)
def calendar(request):
return request.param
@pytest.fixture()
def times(calendar):
cftime = _import_cftime()
return cftime.num2date(
np.arange(4), units='hours since 2000-01-01', calendar=calendar,
only_use_cftime_datetimes=True)
@pytest.fixture()
def data(times):
data = np.random.rand(2, 2, 4)
lons = np.linspace(0, 11, 2)
lats = np.linspace(0, 20, 2)
return DataArray(data, coords=[lons, lats, times],
dims=['lon', 'lat', 'time'], name='data')
@pytest.fixture()
def times_3d(times):
lons = np.linspace(0, 11, 2)
lats = np.linspace(0, 20, 2)
times_arr = np.random.choice(times, size=(2, 2, 4))
return DataArray(times_arr, coords=[lons, lats, times],
dims=['lon', 'lat', 'time'],
name='data')
@pytest.mark.skipif(not has_cftime, reason='cftime not installed')
def test_contains_cftime_datetimes_1d(data):
assert contains_cftime_datetimes(data.time)
@pytest.mark.skipif(not has_dask, reason='dask not installed')
@pytest.mark.skipif(not has_cftime, reason='cftime not installed')
def test_contains_cftime_datetimes_dask_1d(data):
assert contains_cftime_datetimes(data.time.chunk())
@pytest.mark.skipif(not has_cftime, reason='cftime not installed')
def test_contains_cftime_datetimes_3d(times_3d):
assert contains_cftime_datetimes(times_3d)
@pytest.mark.skipif(not has_dask, reason='dask not installed')
@pytest.mark.skipif(not has_cftime, reason='cftime not installed')
def test_contains_cftime_datetimes_dask_3d(times_3d):
assert contains_cftime_datetimes(times_3d.chunk())
@pytest.mark.parametrize('non_cftime_data', [DataArray([]), DataArray([1, 2])])
def test_contains_cftime_datetimes_non_cftimes(non_cftime_data):
assert not contains_cftime_datetimes(non_cftime_data)
@pytest.mark.skipif(not has_dask, reason='dask not installed')
@pytest.mark.parametrize('non_cftime_data', [DataArray([]), DataArray([1, 2])])
def test_contains_cftime_datetimes_non_cftimes_dask(non_cftime_data):
assert not contains_cftime_datetimes(non_cftime_data.chunk())
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason='cftime not installed')
@pytest.mark.parametrize('shape', [(24,), (8, 3), (2, 4, 3)])
def test_encode_cf_datetime_overflow(shape):
# Test for fix to GH 2272
dates = pd.date_range('2100', periods=24).values.reshape(shape)
units = 'days since 1800-01-01'
calendar = 'standard'
num, _, _ = encode_cf_datetime(dates, units, calendar)
roundtrip = decode_cf_datetime(num, units, calendar)
np.testing.assert_array_equal(dates, roundtrip)
def test_encode_cf_datetime_pandas_min():
# Test that encode_cf_datetime does not fail for versions
# of pandas < 0.21.1 (GH 2623).
dates = | pd.date_range('2000', periods=3) | pandas.date_range |
"""
Script to run one-vs-rest cancer type classification, with stratified train and
test sets, for all provided TCGA cancer types.
"""
import sys
import argparse
from pathlib import Path
import numpy as np
import pandas as pd
from tqdm import tqdm
import mpmp.config as cfg
from mpmp.data_models.tcga_data_model import TCGADataModel
from mpmp.exceptions import (
ResultsFileExistsError,
NoTrainSamplesError,
NoTestSamplesError,
OneClassError,
)
from mpmp.prediction.cross_validation import run_cv_stratified
import mpmp.utilities.data_utilities as du
import mpmp.utilities.file_utilities as fu
from mpmp.utilities.tcga_utilities import get_overlap_data_types
def process_args():
"""Parse and format command line arguments."""
parser = argparse.ArgumentParser()
# argument group for parameters related to input/output
# (e.g. filenames, logging/verbosity options, target genes)
#
# these don't affect the model output, and thus don't need to be saved
# with the results of the experiment
io = parser.add_argument_group('io',
'arguments related to script input/output, '
'note these will *not* be saved in metadata ')
io.add_argument('--cancer_types', nargs='*',
help='cancer types to predict, if not included predict '
'all cancer types in TCGA')
io.add_argument('--log_file', default=None,
help='name of file to log skipped cancer types to')
io.add_argument('--output_preds', action='store_true')
io.add_argument('--results_dir', default=cfg.results_dirs['cancer_type'],
help='where to write results to')
io.add_argument('--verbose', action='store_true')
# argument group for parameters related to model training/evaluation
# (e.g. model hyperparameters, preprocessing options)
#
# these affect the output of the model, so we want to save them in the
# same directory as the experiment results
opts = parser.add_argument_group('model_options',
'parameters for training/evaluating model, '
'these will affect output and are saved as '
'experiment metadata ')
opts.add_argument('--debug', action='store_true',
help='use subset of data for fast debugging')
opts.add_argument('--num_folds', type=int, default=4,
help='number of folds of cross-validation to run')
opts.add_argument('--seed', type=int, default=cfg.default_seed)
opts.add_argument('--subset_mad_genes', type=int, default=cfg.num_features_raw,
help='if included, subset gene features to this number of '
'features having highest mean absolute deviation')
opts.add_argument('--training_data', type=str, default='expression',
choices=list(cfg.data_types.keys()),
help='what data type to train model on')
args = parser.parse_args()
args.results_dir = Path(args.results_dir).resolve()
if args.log_file is None:
args.log_file = Path(args.results_dir, 'log_skipped.tsv').resolve()
# check that all provided cancer types are valid TCGA acronyms
sample_info_df = du.load_sample_info(args.training_data, args.verbose)
tcga_cancer_types = list(np.unique(sample_info_df.cancer_type))
if args.cancer_types is None:
args.cancer_types = tcga_cancer_types
else:
not_in_tcga = set(args.cancer_types) - set(tcga_cancer_types)
if len(not_in_tcga) > 0:
parser.error('some cancer types not present in TCGA: {}'.format(
' '.join(not_in_tcga)))
# split args into defined argument groups, since we'll use them differently
arg_groups = du.split_argument_groups(args, parser)
io_args, model_options = arg_groups['io'], arg_groups['model_options']
# add some additional hyperparameters/ranges from config file to model options
# these shouldn't be changed by the user, so they aren't added as arguments
model_options.n_dim = None
model_options.alphas = cfg.alphas
model_options.l1_ratios = cfg.l1_ratios
model_options.standardize_data_types = cfg.standardize_data_types
# add information about valid samples to model options
model_options.sample_overlap_data_types = list(
get_overlap_data_types(use_subsampled=model_options.debug).keys()
)
return io_args, model_options, sample_info_df
if __name__ == '__main__':
# process command line arguments
io_args, model_options, sample_info_df = process_args()
# create results dir and subdir for experiment if they don't exist
experiment_dir = Path(io_args.results_dir, 'cancer_type').resolve()
experiment_dir.mkdir(parents=True, exist_ok=True)
# save model options for this experiment
# (hyperparameters, preprocessing info, etc)
fu.save_model_options(experiment_dir, model_options)
# create empty error log file if it doesn't exist
log_columns = [
'cancer_type',
'training_data',
'shuffle_labels',
'skip_reason'
]
if io_args.log_file.exists() and io_args.log_file.is_file():
log_df = | pd.read_csv(io_args.log_file, sep='\t') | pandas.read_csv |
import pandas as pd
from xlsx2csv import Xlsx2csv
import sys
daytoprocess = sys.argv[1]
print("Load xlsx and transform to csv")
xls_file = pd.ExcelFile("../data/pge/xlsx/"+daytoprocess+".xlsx")
sheet_names = xls_file.sheet_names
print("Process file and concat info in one dataframe")
dffinal = pd.DataFrame(columns=['nombre', 'montant', 'code_section','code_departement'])
for sheet in sheet_names:
if(sheet != 'Récap'):
print(sheet)
df = xls_file.parse(sheet)
#PATCH
#dfnaf = df[28:49]
dfnaf = df[28:50]
dfnaf = dfnaf.rename(columns={dfnaf.columns[0]:"section_naf",dfnaf.columns[1]:"nombre",dfnaf.columns[3]:"montant"})
dfnaf = dfnaf[['section_naf','nombre','montant']]
dfnaf['montant'] = dfnaf['montant'].apply(lambda x: 0 if x == 'ND' else 0 if x == 'NC' else x * 1000000)
dfnaf = dfnaf.reset_index()
dfnaf = dfnaf.drop(columns={'index'})
# PATCH
#sectioncode = "ABCDEFGHIJKLMNOPQRSZX"
sectioncode = "ABCDEFGHIJKLMNOPQRSTZX"
i = 0
naf = []
for index,row in dfnaf.iterrows():
row['code_section'] = sectioncode[i]
i = i +1
naf.append(row)
newdfnaf = pd.DataFrame(naf)
newdfnaf
totalnb = 0
totalm = 0
arr = []
for index,row in newdfnaf.iterrows():
if(row['nombre'] == 'ND'):
row['nombre'] = 0
if(row['nombre'] == 'NC'):
row['nombre'] = 0
if(row['nombre'] != row['nombre']):
row['nombre'] = 0
if(row['montant'] == 'ND'):
row['montant'] = 0
if(row['montant'] != row['montant']):
row['montant'] = 0
if(row['code_section'] != 'X'):
totalnb = totalnb + row['nombre']
totalm = totalm + row['montant']
arr.append(row)
newdfnaf = pd.DataFrame(arr)
newdfnaf
if(newdfnaf[newdfnaf['code_section'] == 'Z'].shape[0] > 0):
if(newdfnaf[newdfnaf['code_section'] == 'Z'].iloc[0]['montant'] != newdfnaf[newdfnaf['code_section'] == 'Z'].iloc[0]['montant']):
newdfnaf.loc[newdfnaf['code_section'] == 'Z', "montant"] = 0
if(newdfnaf[newdfnaf['code_section'] == 'Z'].iloc[0]['nombre'] != newdfnaf[newdfnaf['code_section'] == 'Z'].iloc[0]['nombre']):
newdfnaf.loc[newdfnaf['code_section'] == 'Z', "nombre"] = 0
newdfnaf.loc[newdfnaf['code_section'] == 'Z', "montant"] = newdfnaf[newdfnaf['code_section'] == 'Z'].iloc[0]['montant'] + newdfnaf[newdfnaf['code_section'] == 'X'].iloc[0]['montant'] - totalm
newdfnaf.loc[newdfnaf['code_section'] == 'Z', "nombre"] = newdfnaf[newdfnaf['code_section'] == 'Z'].iloc[0]['nombre'] + newdfnaf[newdfnaf['code_section'] == 'X'].iloc[0]['nombre'] - totalnb
newdfnaf = newdfnaf[:-1]
newdfnaf = newdfnaf[newdfnaf['nombre'].notna()]
newdfnaf = newdfnaf.drop(columns={'section_naf'})
newdfnaf['code_departement'] = sheet
newdfnaf
dffinal = dffinal.append(newdfnaf, ignore_index=True)
print("Clean dataframe")
dffinal['code_departement'] = dffinal['code_departement'].apply(lambda x : '971' if x == '951' else '972' if x == '953' else '973' if x == '952' else '974' if x == '957' else '976' if x == '954' else x)
# PATCH TMP
#dffinal['code_departement'] = dffinal['code_departement'].apply(lambda x : '999' if x == '972' else x)
#dffinal['code_departement'] = dffinal['code_departement'].apply(lambda x : '972' if x == '973' else x)
#dffinal['code_departement'] = dffinal['code_departement'].apply(lambda x : '973' if x == '999' else x)
dffinal = dffinal[['code_departement','code_section','nombre','montant']]
dffinal['last_update'] = daytoprocess
dffinal = dffinal.rename(columns={'code_departement':'dep'})
print("Add Region")
dep = pd.read_csv("../utils/departement2019.csv",dtype={'dep':str,'reg':str})
dep = dep[['dep','reg']]
dffinal = | pd.merge(dffinal, dep, on='dep',how='left') | pandas.merge |
import pandas as pd
import numpy as np
from pyomo import environ
from pyomo.environ import *
def optimization_MIP(model,
x, ## decision variables (already attached to model)
model_master, ## master table that specifies learned functions for constraints (and parameters)
data, ## dataframe holding all data to be used for convex hull
max_violation=None, ## parameter for RF model allowable violation proportion (between 0-1)
tr=True, ## bool variable for the use of trust region constraints
clustering_model=None): ## trained clustering algorithm using the entire data (only active if tr = True)
def logistic_x(proba):
if proba == 0:
proba = 0.00001
if proba == 1:
proba = 0.99999
return - np.log(1 / proba - 1)
def constraints_linear(model, outcome, task, coefficients, lb=None, ub=None, weight_objective=0, SCM=None, features=None):
'''
Embed a trained linear predictive model for 'outcome' into the master 'model'.
'Coefficients' is a model file generated by the constraint_extrapolation_skEN() function.
'lb/ub' specify the lower/upper bounds if 'outcome' is to be incorporated as a constraint.
'weight_objective' specifies the weight to use if incorporating 'outcome' as a term in the objective.
'''
# Row-level information: row = single constraint (multiple rows can correspond to single leaf)
intercept = coefficients['intercept'][0]
coeff = coefficients.drop(['intercept'], axis=1, inplace=False).loc[0, :]
model.add_component('LR'+outcome, Constraint(expr=model.y[outcome] == sum(model.x[i] * coeff.loc[i] for i in pd.DataFrame(coeff).index) + intercept))
if weight_objective != 0:
model.OBJ.set_value(expr=model.OBJ.expr + weight_objective * model.y[outcome])
elif not pd.isna(SCM):
model.add_component('scm_' + outcome, Constraint(expr=model.y[outcome] == SCM + model.x[outcome]))
else:
if not pd.isna(ub):
if task == 'binary':
ub = logistic_x(proba=ub)
model.add_component('ub_' + outcome, Constraint(expr=model.y[outcome] <= ub))
if not pd.isna(lb):
if task == 'binary':
lb = logistic_x(proba=lb)
model.add_component('lb_' + outcome, Constraint(expr=model.y[outcome] >= lb))
def constraints_svm(model, outcome, task, coefficients, lb=None, ub=None, weight_objective=0, SCM=None, features=None):
'''
Embed a trained SVM predictive model for 'outcome' into the master 'model'.
'Coefficients' is a model file generated by the constraint_extrapolation_skSVM() function.
'lb/ub' specify the lower/upper bounds if 'outcome' is to be incorporated as a constraint.
'weight_objective' specifies the weight to use if incorporating 'outcome' as a term in the objective.
'''
# Row-level information: row = single constraint (multiple rows can correspond to single leaf)
intercept = coefficients['intercept'][0]
coeff = coefficients.drop(['intercept'], axis=1, inplace=False).loc[0, :]
# Set y to decision function
model.add_component('SVM'+outcome, Constraint(expr=model.y[outcome] == sum(model.x[i] * coeff.loc[i] for i in features) + intercept))
# Set y to binary: 1 if expr >= 0, else 0
# model.add_component('SVM_lb'+outcome, Constraint(expr=model.y[outcome] >= 1/M*(sum(model.x[i] * coeff.loc[i] for i in features) + intercept)))
if weight_objective != 0:
model.OBJ.set_value(expr=model.OBJ.expr + weight_objective * model.y[outcome])
elif not pd.isna(SCM):
model.add_component('scm_' + outcome, Constraint(expr=model.y[outcome] == SCM + model.x[outcome]))
else:
if task == "continuous":
if not pd.isna(ub):
model.add_component('ub_' + outcome, Constraint(expr=model.y[outcome] <= ub))
if not | pd.isna(lb) | pandas.isna |
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 16 09:50:42 2019
@author: michaelek
"""
import os
import numpy as np
import pandas as pd
import yaml
from allotools.data_io import get_permit_data, get_usage_data, allo_filter
from allotools.allocation_ts import allo_ts
from allotools.utils import grp_ts_agg
# from allotools.plot import plot_group as pg
# from allotools.plot import plot_stacked as ps
from datetime import datetime
# from scipy.special import erfc
# from matplotlib.pyplot import show
#########################################
### parameters
base_path = os.path.realpath(os.path.dirname(__file__))
with open(os.path.join(base_path, 'parameters.yml')) as param:
param = yaml.safe_load(param)
pk = ['permit_id', 'wap', 'date']
dataset_types = ['allo', 'metered_allo', 'usage', 'usage_est']
allo_type_dict = {'D': 'max_daily_volume', 'W': 'max_daily_volume', 'M': 'max_annual_volume', 'A-JUN': 'max_annual_volume', 'A': 'max_annual_volume'}
# allo_mult_dict = {'D': 0.001*24*60*60, 'W': 0.001*24*60*60*7, 'M': 0.001*24*60*60*30, 'A-JUN': 0.001*24*60*60*365, 'A': 0.001*24*60*60*365}
temp_datasets = ['allo_ts', 'total_allo_ts', 'wap_allo_ts', 'usage_ts', 'metered_allo_ts']
#######################################
### Testing
# from_date = '2000-07-01'
# to_date = '2020-06-30'
#
# self = AlloUsage(from_date=from_date, to_date=to_date)
#
# results1 = self.get_ts(['allo', 'metered_allo', 'usage'], 'M', ['permit_id', 'wap'])
# results2 = self.get_ts(['usage'], 'D', ['wap'])
# results3 = self.get_ts(['allo', 'metered_allo', 'usage', 'usage_est'], 'M', ['permit_id', 'wap'])
# results3 = self.get_ts(['allo', 'metered_allo', 'usage', 'usage_est'], 'D', ['permit_id', 'wap'])
# wap_filter = {'wap': ['C44/0001']}
#
# self = AlloUsage(from_date=from_date, to_date=to_date, wap_filter=wap_filter)
#
# results1 = self.get_ts(['allo', 'metered_allo', 'usage'], 'M', ['permit_id', 'wap'])
# results2 = self.get_ts(['usage'], 'D', ['wap'])
# permit_filter = {'permit_id': ['200040']}
#
# self = AlloUsage(from_date=from_date, to_date=to_date, permit_filter=permit_filter)
#
# results1 = self.get_ts(['allo', 'metered_allo', 'usage', 'usage_est'], 'M', ['permit_id', 'wap'])
# results2 = self.get_ts(['allo', 'metered_allo', 'usage', 'usage_est'], 'D', ['permit_id', 'wap'])
########################################
### Core class
class AlloUsage(object):
"""
Class to to process the allocation and usage data in NZ.
Parameters
----------
from_date : str or None
The start date of the consent and the final time series. In the form of '2000-01-01'. None will return all consents and subsequently all dates.
to_date : str or None
The end date of the consent and the final time series. In the form of '2000-01-01'. None will return all consents and subsequently all dates.
permit_filter : dict
If permit_id_filter is a list, then it should represent the columns from the permit table that should be returned. If it's a dict, then the keys should be the column names and the values should be the filter on those columns.
wap_filter : dict
If wap_filter is a list, then it should represent the columns from the wap table that should be returned. If it's a dict, then the keys should be the column names and the values should be the filter on those columns.
only_consumptive : bool
Should only the consumptive takes be returned? Default True
include_hydroelectric : bool
Should hydro-electric takes be included? Default False
Returns
-------
AlloUsage object
with all of the base sites, allo, and allo_wap DataFrames
"""
dataset_types = dataset_types
# plot_group = pg
# plot_stacked = ps
_usage_remote = param['remote']['usage']
_permit_remote = param['remote']['permit']
### Initial import and assignment function
def __init__(self, from_date=None, to_date=None, permit_filter=None, wap_filter=None, only_consumptive=True, include_hydroelectric=False):
"""
Parameters
----------
from_date : str or None
The start date of the consent and the final time series. In the form of '2000-01-01'. None will return all consents and subsequently all dates.
to_date : str or None
The end date of the consent and the final time series. In the form of '2000-01-01'. None will return all consents and subsequently all dates.
permit_filter : dict
If permit_id_filter is a list, then it should represent the columns from the permit table that should be returned. If it's a dict, then the keys should be the column names and the values should be the filter on those columns.
wap_filter : dict
If wap_filter is a list, then it should represent the columns from the wap table that should be returned. If it's a dict, then the keys should be the column names and the values should be the filter on those columns.
only_consumptive : bool
Should only the consumptive takes be returned? Default True
include_hydroelectric : bool
Should hydro-electric takes be included? Default False
Returns
-------
AlloUsage object
with all of the base sites, allo, and allo_wap DataFrames
"""
permits0 = get_permit_data(self._permit_remote['connection_config'], self._permit_remote['bucket'], self._permit_remote['permits_key'])
waps, permits = allo_filter(permits0, from_date, to_date, permit_filter=permit_filter, wap_filter=wap_filter, only_consumptive=only_consumptive, include_hydroelectric=include_hydroelectric)
if from_date is None:
from_date1 = pd.Timestamp('1900-07-01')
else:
from_date1 = pd.Timestamp(from_date)
if to_date is None:
to_date1 = pd.Timestamp.now().floor('D')
else:
to_date1 = pd.Timestamp(to_date)
setattr(self, 'waps', waps)
setattr(self, 'permits', permits)
# setattr(self, 'sd', sd)
setattr(self, 'from_date', from_date1)
setattr(self, 'to_date', to_date1)
def _est_allo_ts(self):
"""
"""
### Run the allocation time series creation
### This has currently been hard-soded to only use the max rate. This should probably be changed once the permitting data gets fixed.
limit_col = allo_type_dict[self.freq]
# multiplier = allo_mult_dict[self.freq]
# limit_col = 'max_rate'
allo4 = allo_ts(self.permits, self.from_date, self.to_date, self.freq, limit_col).round()
allo4.name = 'total_allo'
# allo4 = (allo4 * multiplier).round()
# if self.irr_season and ('A' not in self.freq):
# dates1 = allo4.index.levels[2]
# dates2 = dates1[dates1.month.isin([10, 11, 12, 1, 2, 3, 4])]
# allo4 = allo4.loc[(slice(None), slice(None), dates2)]
setattr(self, 'total_allo_ts', allo4.reset_index())
def _allo_wap_spit(self):
"""
"""
allo6 = pd.merge(self.total_allo_ts, self.waps[['permit_id', 'wap', 'sd_ratio']], on=['permit_id'])
# allo6 = pd.merge(allo5, self.sd, on=['permit_id', 'wap'], how='left')
allo6['combo_wap_allo'] = allo6.groupby(['permit_id', 'hydro_feature', 'date'])['total_allo'].transform('sum')
allo6['combo_wap_ratio'] = allo6['total_allo']/allo6['combo_wap_allo']
allo6['wap_allo'] = allo6['total_allo'] * allo6['combo_wap_ratio']
allo7 = allo6.drop(['combo_wap_allo', 'combo_wap_ratio', 'total_allo'], axis=1).rename(columns={'wap_allo': 'total_allo'}).copy()
## Calculate the stream depletion
allo7.loc[allo7.sd_ratio.isnull() & (allo7.hydro_feature == 'groundwater'), 'sd_ratio'] = 0
allo7.loc[allo7.sd_ratio.isnull() & (allo7.hydro_feature == 'surface water'), 'sd_ratio'] = 1
allo7['sw_allo'] = allo7['total_allo'] * allo7['sd_ratio']
allo7['gw_allo'] = allo7['total_allo'] - allo7['sw_allo']
allo8 = allo7.drop(['hydro_feature', 'sd_ratio'], axis=1).groupby(pk).mean()
setattr(self, 'wap_allo_ts', allo8)
def _get_allo_ts(self):
"""
Function to create an allocation time series.
"""
if not hasattr(self, 'total_allo_ts'):
self._est_allo_ts()
### Convert to GW and SW allocation
self._allo_wap_spit()
def _process_usage(self):
"""
"""
if not hasattr(self, 'wap_allo_ts'):
self._get_allo_ts()
allo1 = self.wap_allo_ts.copy().reset_index()
waps = allo1.wap.unique().tolist()
## Get the ts data and aggregate
if hasattr(self, 'usage_ts_daily'):
tsdata1 = self.usage_ts_daily
else:
tsdata1, stns_waps = get_usage_data(self._usage_remote['connection_config'], self._usage_remote['bucket'], waps, self.from_date, self.to_date)
tsdata1.rename(columns={'water_use': 'total_usage', 'time': 'date'}, inplace=True)
tsdata1 = tsdata1[['wap', 'date', 'total_usage']].copy()
## filter - remove individual spikes and negative values
tsdata1.loc[tsdata1['total_usage'] < 0, 'total_usage'] = 0
def remove_spikes(x):
val1 = bool(x[1] > (x[0] + x[2] + 2))
if val1:
return (x[0] + x[2])/2
else:
return x[1]
tsdata1.iloc[1:-1, 2] = tsdata1['total_usage'].rolling(3, center=True).apply(remove_spikes, raw=True).iloc[1:-1]
setattr(self, 'usage_ts_daily', tsdata1)
## Convert station data to DataFrame
stns_waps1 = pd.DataFrame([{'wap': s['ref'], 'lon': s['geometry']['coordinates'][0], 'lat': s['geometry']['coordinates'][1]} for s in stns_waps])
setattr(self, 'waps_only', stns_waps1)
### Aggregate
tsdata2 = grp_ts_agg(tsdata1, 'wap', 'date', self.freq, 'sum')
setattr(self, 'usage_ts', tsdata2)
def _usage_estimation(self, usage_allo_ratio=2, buffer_dis=40000, min_months=36):
"""
"""
from gistools import vector
### Get the necessary data
a1 = AlloUsage()
a1.permits = self.permits.copy()
a1.waps = self.waps.copy()
# a1.from_date = self.from_date
# a1.to_date = self.to_date
allo_use1 = a1.get_ts(['allo', 'metered_allo', 'usage'], 'M', ['permit_id', 'wap'])
permits = self.permits.copy()
### Create Wap locations
waps1 = vector.xy_to_gpd('wap', 'lon', 'lat', self.waps.drop('permit_id', axis=1).drop_duplicates('wap'), 4326)
waps2 = waps1.to_crs(2193)
### Determine which Waps need to be estimated
allo_use_mis1 = allo_use1[allo_use1['total_metered_allo'] == 0].copy().reset_index()
allo_use_with1 = allo_use1[allo_use1['total_metered_allo'] > 0].copy().reset_index()
mis_waps1 = allo_use_mis1.groupby(['permit_id', 'wap'])['total_allo'].count().copy()
with_waps1 = allo_use_with1.groupby(['permit_id', 'wap'])['total_allo'].count()
with_waps2 = with_waps1[with_waps1 >= min_months]
with_waps3 = pd.merge(with_waps2.reset_index()[['permit_id', 'wap']], permits[['permit_id', 'use_type']], on='permit_id')
with_waps4 = pd.merge(waps2, with_waps3['wap'], on='wap')
mis_waps2 = pd.merge(mis_waps1.reset_index(), permits[['permit_id', 'use_type']], on='permit_id')
mis_waps3 = pd.merge(waps2, mis_waps2['wap'], on='wap')
mis_waps3['geometry'] = mis_waps3['geometry'].buffer(buffer_dis)
# mis_waps3.rename(columns={'wap': 'mis_wap'}, inplace=True)
mis_waps4, poly1 = vector.pts_poly_join(with_waps4.rename(columns={'wap': 'good_wap'}), mis_waps3, 'wap')
## Calc ratios
allo_use_with2 = pd.merge(allo_use_with1, permits[['permit_id', 'use_type']], on='permit_id')
allo_use_with2['month'] = allo_use_with2['date'].dt.month
allo_use_with2['usage_allo'] = allo_use_with2['total_usage']/allo_use_with2['total_allo']
allo_use_ratio1 = allo_use_with2.groupby(['permit_id', 'wap', 'use_type', 'month'])['usage_allo'].mean().reset_index()
allo_use_ratio2 = pd.merge(allo_use_ratio1.rename(columns={'wap': 'good_wap'}), mis_waps4[['good_wap', 'wap']], on='good_wap')
## Combine with the missing ones
allo_use_mis2 = pd.merge(allo_use_mis1[['permit_id', 'wap', 'date']], permits[['permit_id', 'use_type']], on='permit_id')
allo_use_mis2['month'] = allo_use_mis2['date'].dt.month
allo_use_mis3 = pd.merge(allo_use_mis2, allo_use_ratio2[['use_type', 'month', 'usage_allo', 'wap']], on=['use_type', 'wap', 'month'])
allo_use_mis4 = allo_use_mis3.groupby(['permit_id', 'wap', 'date'])['usage_allo'].mean().reset_index()
allo_use_mis5 = pd.merge(allo_use_mis4, allo_use_mis1[['permit_id', 'wap', 'date', 'total_allo', 'sw_allo', 'gw_allo']], on=['permit_id', 'wap', 'date'])
allo_use_mis5['total_usage_est'] = (allo_use_mis5['usage_allo'] * allo_use_mis5['total_allo']).round()
allo_use_mis5['sw_usage_est'] = (allo_use_mis5['usage_allo'] * allo_use_mis5['sw_allo']).round()
allo_use_mis5['gw_usage_est'] = allo_use_mis5['total_usage_est'] - allo_use_mis5['sw_usage_est']
allo_use_mis6 = allo_use_mis5[['permit_id', 'wap', 'date', 'total_usage_est', 'sw_usage_est', 'gw_usage_est']].copy()
### Convert to daily if required
if self.freq == 'D':
days1 = allo_use_mis6.date.dt.daysinmonth
days2 = pd.to_timedelta((days1/2).round().astype('int32'), unit='D')
allo_use_mis6['total_usage_est'] = allo_use_mis6['total_usage_est'] / days1
allo_use_mis6['sw_usage_est'] = allo_use_mis6['sw_usage_est'] / days1
allo_use_mis6['gw_usage_est'] = allo_use_mis6['gw_usage_est'] / days1
usage_rate0 = allo_use_mis6.copy()
usage_rate0['date'] = usage_rate0['date'] - days2
grp1 = allo_use_mis6.groupby(['permit_id', 'wap'])
first1 = grp1.first()
last1 = grp1.last()
first1.loc[:, 'date'] = pd.to_datetime(first1.loc[:, 'date'].dt.strftime('%Y-%m') + '-01')
usage_rate1 = pd.concat([first1, usage_rate0.set_index(['permit_id', 'wap']), last1], sort=True).reset_index().sort_values(['permit_id', 'wap', 'date'])
usage_rate1.set_index('date', inplace=True)
usage_daily_rate1 = usage_rate1.groupby(['permit_id', 'wap']).apply(lambda x: x.resample('D').interpolate(method='pchip')[['total_usage_est', 'sw_usage_est', 'gw_usage_est']]).round(2)
else:
usage_daily_rate1 = allo_use_mis6.set_index(['permit_id', 'wap', 'date'])
usage_daily_rate2 = usage_daily_rate1.loc[slice(None), slice(None), self.from_date:self.to_date]
setattr(self, 'usage_est', usage_daily_rate2)
return usage_daily_rate2
def _split_usage_ts(self, usage_allo_ratio=2):
"""
"""
### Get the usage data if it exists
if not hasattr(self, 'usage_ts'):
self._process_usage()
tsdata2 = self.usage_ts.copy().reset_index()
if not hasattr(self, 'allo_ts'):
allo1 = self._get_allo_ts()
allo1 = self.wap_allo_ts.copy().reset_index()
allo1['combo_allo'] = allo1.groupby(['wap', 'date'])['total_allo'].transform('sum')
allo1['combo_ratio'] = allo1['total_allo']/allo1['combo_allo']
### combine with consents info
usage1 = pd.merge(allo1, tsdata2, on=['wap', 'date'])
usage1['total_usage'] = usage1['total_usage'] * usage1['combo_ratio']
### Remove high outliers
usage1.loc[usage1['total_usage'] > (usage1['total_allo'] * usage_allo_ratio), 'total_usage'] = np.nan
### Split the GW and SW components
usage1['sw_ratio'] = usage1['sw_allo']/usage1['total_allo']
usage1['sw_usage'] = usage1['sw_ratio'] * usage1['total_usage']
usage1['gw_usage'] = usage1['total_usage'] - usage1['sw_usage']
usage1.loc[usage1['gw_usage'] < 0, 'gw_usage'] = 0
usage1.drop(['sw_allo', 'gw_allo', 'total_allo', 'combo_allo', 'combo_ratio', 'sw_ratio'], axis=1, inplace=True)
usage2 = usage1.dropna().groupby(pk).mean()
setattr(self, 'split_usage_ts', usage2)
def _get_metered_allo_ts(self, proportion_allo=True):
"""
"""
setattr(self, 'proportion_allo', proportion_allo)
### Get the allocation ts either total or metered
if not hasattr(self, 'wap_allo_ts'):
self._get_allo_ts()
allo1 = self.wap_allo_ts.copy().reset_index()
rename_dict = {'sw_allo': 'sw_metered_allo', 'gw_allo': 'gw_metered_allo', 'total_allo': 'total_metered_allo'}
### Combine the usage data to the allo data
if not hasattr(self, 'split_usage_ts'):
self._split_usage_ts()
allo2 = pd.merge(self.split_usage_ts.reset_index()[pk], allo1, on=pk, how='right', indicator=True)
## Re-categorise
allo2['_merge'] = allo2._merge.cat.rename_categories({'left_only': 2, 'right_only': 0, 'both': 1}).astype(int)
if proportion_allo:
allo2.loc[allo2._merge != 1, list(rename_dict.keys())] = 0
allo3 = allo2.drop('_merge', axis=1).copy()
else:
allo2['usage_waps'] = allo2.groupby(['permit_id', 'date'])['_merge'].transform('sum')
allo2.loc[allo2.usage_waps == 0, list(rename_dict.keys())] = 0
allo3 = allo2.drop(['_merge', 'usage_waps'], axis=1).copy()
allo3.rename(columns=rename_dict, inplace=True)
allo4 = allo3.groupby(pk).mean()
if 'total_metered_allo' in allo3:
setattr(self, 'metered_allo_ts', allo4)
else:
setattr(self, 'metered_restr_allo_ts', allo4)
def get_ts(self, datasets, freq, groupby, usage_allo_ratio=2, buffer_dis=40000, min_months=36):
"""
Function to create a time series of allocation and usage.
Parameters
----------
datasets : list of str
The dataset types to be returned. Must be one or more of {ds}.
freq : str
Pandas time frequency code for the time interval. Must be one of 'D', 'W', 'M', 'A', or 'A-JUN'.
groupby : list of str
The fields that should grouped by when returned. Can be any variety of fields including crc, take_type, allo_block, 'wap', CatchmentGroupName, etc. Date will always be included as part of the output group, so it doesn't need to be specified in the groupby.
usage_allo_ratio : int or float
The cut off ratio of usage/allocation. Any usage above this ratio will be removed from the results (subsequently reducing the metered allocation).
Results
-------
DataFrame
Indexed by the groupby (and date)
"""
### Add in date to groupby if it's not there
if not 'date' in groupby:
groupby.append('date')
### Check the dataset types
if not np.in1d(datasets, self.dataset_types).all():
raise ValueError('datasets must be a list that includes one or more of ' + str(self.dataset_types))
### Check new to old parameters and remove attributes if necessary
if 'A' in freq:
freq_agg = freq
freq = 'M'
else:
freq_agg = freq
if hasattr(self, 'freq'):
# if (self.freq != freq) or (self.sd_days != sd_days) or (self.irr_season != irr_season):
if (self.freq != freq):
for d in temp_datasets:
if hasattr(self, d):
delattr(self, d)
### Assign pararameters
setattr(self, 'freq', freq)
# setattr(self, 'sd_days', sd_days)
# setattr(self, 'irr_season', irr_season)
### Get the results and combine
all1 = []
if 'allo' in datasets:
self._get_allo_ts()
all1.append(self.wap_allo_ts)
if 'metered_allo' in datasets:
self._get_metered_allo_ts()
all1.append(self.metered_allo_ts)
if 'usage' in datasets:
self._split_usage_ts(usage_allo_ratio)
all1.append(self.split_usage_ts)
if 'usage_est' in datasets:
usage_est = self._usage_estimation(usage_allo_ratio, buffer_dis, min_months)
all1.append(usage_est)
if 'A' in freq_agg:
all2 = grp_ts_agg(pd.concat(all1, axis=1).reset_index(), ['permit_id', 'wap'], 'date', freq_agg, 'sum').reset_index()
else:
all2 = pd.concat(all1, axis=1).reset_index()
if not np.in1d(groupby, pk).all():
all2 = self._merge_extra(all2, groupby)
all3 = all2.groupby(groupby).sum()
all3.name = 'results'
return all3
def _merge_extra(self, data, cols):
"""
"""
# sites_col = [c for c in cols if c in self.waps.columns]
allo_col = [c for c in cols if c in self.permits.columns]
data1 = data.copy()
# if sites_col:
# all_sites_col = ['wap']
# all_sites_col.extend(sites_col)
# data1 = pd.merge(data1, self.waps.reset_index()[all_sites_col], on='wap')
if allo_col:
all_allo_col = ['permit_id']
all_allo_col.extend(allo_col)
data1 = | pd.merge(data1, self.permits[all_allo_col], on=all_allo_col) | pandas.merge |
'''
Created on 2017. 6. 29.
@author: Summit
'''
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
num_points = 2000
vectors_set = []
for i in range(num_points):
if np.random.random() > 0.5:
vectors_set.append([np.random.normal(0.0, 0.9),
np.random.normal(0.0,0.9)])
else:
vectors_set.append([np.random.normal(3.0,0.5),
np.random.normal(1.0, 0.5)])
df = pd.DataFrame({"x": [v[0] for v in vectors_set],
"y": [v[1] for v in vectors_set]})
sns.lmplot("x","y", data=df, fit_reg=False, size=6)
plt.show()
vectors = tf.constant(vectors_set)
k = 4
centroides = tf.Variable(tf.slice(tf.random_shuffle(vectors),
[0,0],[k,-1]))
expanded_vectors = tf.expand_dims(vectors, 0)
expanded_centroides = tf.expand_dims(centroides, 1)
print(expanded_vectors.get_shape())
print(expanded_centroides.get_shape())
assignments = tf.argmin(tf.reduce_sum(tf.square(tf.subtract(expanded_vectors,expanded_centroides)), 2), 0)
means = tf.concat(axis=0, values=[
tf.reduce_mean(
tf.gather(vectors,
tf.reshape(
tf.where(
tf.equal(assignments, c)
), [1,-1])
), axis=[1])
for c in range(k)])
update_centroides = tf.assign(centroides, means)
init_op = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init_op)
for step in range(100):
_, centroid_values, assignment_values = sess.run([update_centroides,
centroides,
assignments])
print("centroies")
print(centroid_values)
data = {"x":[], "y":[], "cluster":[]}
for i in range(len(assignment_values)):
data["x"].append(vectors_set[i][0])
data["y"].append(vectors_set[i][1])
data["cluster"].append(assignment_values[i])
df = | pd.DataFrame(data) | pandas.DataFrame |
"""
V 2.2.1
"""
import argparse
import glob
import os
import random
import sys
import time
import tensorflow
from tensorflow.python.framework.errors_impl import FailedPreconditionError
import numpy as np
import pandas as pd
from sklearn.metrics import auc
from sklearn.metrics import precision_recall_curve, roc_curve
from tensorflow.keras.callbacks import Callback
from tensorflow.keras.callbacks import EarlyStopping
from ML.DDModel import DDModel
from ML.DDCallbacks import DDLogger
START_TIME = time.time()
print("Parsing args...")
parser = argparse.ArgumentParser()
parser.add_argument('-num_units','--nu',required=True)
parser.add_argument('-dropout','--df',required=True)
parser.add_argument('-learn_rate','--lr',required=True)
parser.add_argument('-bin_array','--ba',required=True)
parser.add_argument('-wt','--wt',required=True)
parser.add_argument('-cf','--cf',required=True)
parser.add_argument('-rec','--rec',required=True)
parser.add_argument('-n_it','--n_it',required=True)
parser.add_argument('-t_mol','--t_mol',required=True)
parser.add_argument('-bs','--bs',required=True)
parser.add_argument('-os','--os',required=True)
parser.add_argument('-d_path','--data_path',required=True) # new!
# adding parameter for where to save all the data to:
parser.add_argument('-s_path', '--save_path', required=False, default=None)
# allowing for variable number of molecules to validate and test from:
parser.add_argument('-n_mol', '--number_mol', required=False, default=1000000)
parser.add_argument('-t_n_mol', '--train_num_mol', required=False, default=-1)
parser.add_argument('-cont', '--continuous', required=False, action='store_true') # Using binary or continuous labels
parser.add_argument('-smile', '--smiles', required=False, action='store_true') # Using smiles or morgan as or continuous labels
parser.add_argument('-norm', '--normalization', required=False, action='store_false') # if continuous labels are used -> normalize them?
io_args = parser.parse_args()
print(sys.argv)
nu = int(io_args.nu)
df = float(io_args.df)
lr = float(io_args.lr)
ba = int(io_args.ba)
wt = float(io_args.wt)
cf = float(io_args.cf)
rec = float(io_args.rec)
n_it = int(io_args.n_it)
bs = int(io_args.bs)
oss = int(io_args.os)
t_mol = float(io_args.t_mol)
CONTINUOUS = io_args.continuous
NORMALIZE = io_args.normalization
SMILES = io_args.smiles
TRAINING_SIZE = int(io_args.train_num_mol)
num_molec = int(io_args.number_mol)
DATA_PATH = io_args.data_path # Now == file_path/protein
SAVE_PATH = io_args.save_path
# if no save path is provided we just save it in the same location as the data
if SAVE_PATH is None: SAVE_PATH = DATA_PATH
print(nu,df,lr,ba,wt,cf,bs,oss,DATA_PATH)
if TRAINING_SIZE == -1: print("Training size not specified, using entire dataset...")
print("Finished parsing args...")
def encode_smiles(series):
print("Encoding smiles")
# parameter is a pd.series with ZINC_IDs as the indicies and smiles as the elements
encoded_smiles = DDModel.process_smiles(series.values, 100, fit_range=100, use_padding=True, normalize=True)
encoded_dict = dict(zip(series.keys(), encoded_smiles))
# returns a dict array of the smiles.
return encoded_dict
def get_oversampled_smiles(Oversampled_zid, smiles_series):
# Must return a dictionary where the keys are the zids and the items are
# numpy ndarrys with n numbers of the same encoded smile
# the n comes from the number of times that particular zid was chosen at random.
oversampled_smiles = {}
encoded_smiles = encode_smiles(smiles_series)
for key in Oversampled_zid.keys():
smile = encoded_smiles[key]
oversampled_smiles[key] = np.repeat([smile], Oversampled_zid[key], axis=0)
return oversampled_smiles
def get_oversampled_morgan(Oversampled_zid, fname):
print('x data from:', fname)
# Gets only the morgan fingerprints of those randomly selected zinc ids
with open(fname,'r') as ref:
for line in ref:
tmp=line.rstrip().split(',')
# only extracting those that were randomly selected
if (tmp[0] in Oversampled_zid.keys()) and (type(Oversampled_zid[tmp[0]]) != np.ndarray):
train_set = np.zeros([1,1024])
on_bit_vector = tmp[1:]
for elem in on_bit_vector:
train_set[0,int(elem)] = 1
# creates a n x 1024 numpy ndarray where n is the number of times that zinc id was randomly selected
Oversampled_zid[tmp[0]] = np.repeat(train_set, Oversampled_zid[tmp[0]], axis=0)
return Oversampled_zid
def get_morgan_and_scores(morgan_path, ID_labels):
# ID_labels is a dataframe containing the zincIDs and their corresponding scores.
train_set = np.zeros([num_molec,1024], dtype=bool) # using bool to save space
train_id = []
print('x data from:', morgan_path)
with open(morgan_path,'r') as ref:
line_no=0
for line in ref:
if line_no >= num_molec:
break
mol_info=line.rstrip().split(',')
train_id.append(mol_info[0])
# "Decompressing" the information from the file about where the 1s are on the 1024 bit vector.
bit_indicies = mol_info[1:] # array of indexes of the binary 1s in the 1024 bit vector representing the morgan fingerprint
for elem in bit_indicies:
train_set[line_no,int(elem)] = 1
line_no+=1
train_set = train_set[:line_no,:]
print('Done...')
train_pd = pd.DataFrame(data=train_set, dtype=np.uint8)
train_pd['ZINC_ID'] = train_id
ID_labels = ID_labels.to_frame()
print(ID_labels.columns)
score_col = ID_labels.columns.difference(['ZINC_ID'])[0]
print(score_col)
train_data = pd.merge(ID_labels, train_pd, how='inner',on=['ZINC_ID'])
X_train = train_data[train_data.columns.difference(['ZINC_ID', score_col])].values # input
y_train = train_data[[score_col]].values # labels
return X_train, y_train
# Gets the labels data
def get_data(smiles_path, morgan_path, labels_path):
# Loading the docking scores (with corresponding Zinc_IDs)
labels = pd.read_csv(labels_path, sep=',', header=0)
# Merging and setting index to the ID if smiles flag is set
if SMILES:
smiles = pd.read_csv(smiles_path, sep=' ', names=['smile', 'ZINC_ID'])
data = smiles.merge(labels, on='ZINC_ID')
else:
morgan = pd.read_csv(morgan_path, usecols=[0], header=0, names=['ZINC_ID']) # reading in only the zinc ids
data = morgan.merge(labels, on='ZINC_ID')
data.set_index('ZINC_ID', inplace=True)
return data
n_iteration = n_it
total_mols = t_mol
try:
os.mkdir(SAVE_PATH + '/iteration_'+str(n_iteration)+'/all_models')
except OSError:
pass
# Getting data from prev iterations and this iteration
data_from_prev = pd.DataFrame()
train_data = pd.DataFrame()
test_data = | pd.DataFrame() | pandas.DataFrame |
import inspect
import os
from unittest.mock import MagicMock, patch
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from evalml.model_understanding.graphs import visualize_decision_tree
from evalml.pipelines.components import ComponentBase
from evalml.utils.gen_utils import (
SEED_BOUNDS,
_convert_to_woodwork_structure,
_convert_woodwork_types_wrapper,
_rename_column_names_to_numeric,
classproperty,
convert_to_seconds,
drop_rows_with_nans,
get_importable_subclasses,
get_random_seed,
import_or_raise,
infer_feature_types,
jupyter_check,
pad_with_nans,
save_plot
)
@patch('importlib.import_module')
def test_import_or_raise_errors(dummy_importlib):
def _mock_import_function(library_str):
if library_str == "_evalml":
raise ImportError("Mock ImportError executed!")
if library_str == "attr_error_lib":
raise Exception("Mock Exception executed!")
dummy_importlib.side_effect = _mock_import_function
with pytest.raises(ImportError, match="Missing optional dependency '_evalml'"):
import_or_raise("_evalml")
with pytest.raises(ImportError, match="Missing optional dependency '_evalml'. Please use pip to install _evalml. Additional error message"):
import_or_raise("_evalml", "Additional error message")
with pytest.raises(Exception, match="An exception occurred while trying to import `attr_error_lib`: Mock Exception executed!"):
import_or_raise("attr_error_lib")
def test_import_or_raise_imports():
math = import_or_raise("math", "error message")
assert math.ceil(0.1) == 1
def test_convert_to_seconds():
assert convert_to_seconds("10 s") == 10
assert convert_to_seconds("10 sec") == 10
assert convert_to_seconds("10 second") == 10
assert convert_to_seconds("10 seconds") == 10
assert convert_to_seconds("10 m") == 600
assert convert_to_seconds("10 min") == 600
assert convert_to_seconds("10 minute") == 600
assert convert_to_seconds("10 minutes") == 600
assert convert_to_seconds("10 h") == 36000
assert convert_to_seconds("10 hr") == 36000
assert convert_to_seconds("10 hour") == 36000
assert convert_to_seconds("10 hours") == 36000
with pytest.raises(AssertionError, match="Invalid unit."):
convert_to_seconds("10 years")
def test_get_random_seed_rng():
def make_mock_random_state(return_value):
class MockRandomState(np.random.RandomState):
def __init__(self):
self.min_bound = None
self.max_bound = None
super().__init__()
def randint(self, min_bound, max_bound):
self.min_bound = min_bound
self.max_bound = max_bound
return return_value
return MockRandomState()
rng = make_mock_random_state(42)
assert get_random_seed(rng) == 42
assert rng.min_bound == SEED_BOUNDS.min_bound
assert rng.max_bound == SEED_BOUNDS.max_bound
def test_get_random_seed_int():
# ensure the invariant "min_bound < max_bound" is enforced
with pytest.raises(ValueError):
get_random_seed(0, min_bound=0, max_bound=0)
with pytest.raises(ValueError):
get_random_seed(0, min_bound=0, max_bound=-1)
# test default boundaries to show the provided value should modulate within the default range
assert get_random_seed(SEED_BOUNDS.max_bound - 2) == SEED_BOUNDS.max_bound - 2
assert get_random_seed(SEED_BOUNDS.max_bound - 1) == SEED_BOUNDS.max_bound - 1
assert get_random_seed(SEED_BOUNDS.max_bound) == SEED_BOUNDS.min_bound
assert get_random_seed(SEED_BOUNDS.max_bound + 1) == SEED_BOUNDS.min_bound + 1
assert get_random_seed(SEED_BOUNDS.max_bound + 2) == SEED_BOUNDS.min_bound + 2
assert get_random_seed(SEED_BOUNDS.min_bound - 2) == SEED_BOUNDS.max_bound - 2
assert get_random_seed(SEED_BOUNDS.min_bound - 1) == SEED_BOUNDS.max_bound - 1
assert get_random_seed(SEED_BOUNDS.min_bound) == SEED_BOUNDS.min_bound
assert get_random_seed(SEED_BOUNDS.min_bound + 1) == SEED_BOUNDS.min_bound + 1
assert get_random_seed(SEED_BOUNDS.min_bound + 2) == SEED_BOUNDS.min_bound + 2
# vectorize get_random_seed via a wrapper for easy evaluation
default_min_bound = inspect.signature(get_random_seed).parameters['min_bound'].default
default_max_bound = inspect.signature(get_random_seed).parameters['max_bound'].default
assert default_min_bound == SEED_BOUNDS.min_bound
assert default_max_bound == SEED_BOUNDS.max_bound
def get_random_seed_vec(min_bound=None, max_bound=None): # passing None for either means no value is provided to get_random_seed
def get_random_seed_wrapper(random_seed):
return get_random_seed(random_seed,
min_bound=min_bound if min_bound is not None else default_min_bound,
max_bound=max_bound if max_bound is not None else default_max_bound)
return np.vectorize(get_random_seed_wrapper)
# ensure that regardless of the setting of min_bound and max_bound, the output of get_random_seed always stays
# between the min_bound (inclusive) and max_bound (exclusive), and wraps neatly around that range using modular arithmetic.
vals = np.arange(-100, 100)
def make_expected_values(vals, min_bound, max_bound):
return np.array([i if (min_bound <= i and i < max_bound) else ((i - min_bound) % (max_bound - min_bound)) + min_bound
for i in vals])
np.testing.assert_equal(get_random_seed_vec(min_bound=None, max_bound=None)(vals),
make_expected_values(vals, min_bound=SEED_BOUNDS.min_bound, max_bound=SEED_BOUNDS.max_bound))
np.testing.assert_equal(get_random_seed_vec(min_bound=None, max_bound=10)(vals),
make_expected_values(vals, min_bound=SEED_BOUNDS.min_bound, max_bound=10))
np.testing.assert_equal(get_random_seed_vec(min_bound=-10, max_bound=None)(vals),
make_expected_values(vals, min_bound=-10, max_bound=SEED_BOUNDS.max_bound))
np.testing.assert_equal(get_random_seed_vec(min_bound=0, max_bound=5)(vals),
make_expected_values(vals, min_bound=0, max_bound=5))
np.testing.assert_equal(get_random_seed_vec(min_bound=-5, max_bound=0)(vals),
make_expected_values(vals, min_bound=-5, max_bound=0))
np.testing.assert_equal(get_random_seed_vec(min_bound=-5, max_bound=5)(vals),
make_expected_values(vals, min_bound=-5, max_bound=5))
np.testing.assert_equal(get_random_seed_vec(min_bound=5, max_bound=10)(vals),
make_expected_values(vals, min_bound=5, max_bound=10))
np.testing.assert_equal(get_random_seed_vec(min_bound=-10, max_bound=-5)(vals),
make_expected_values(vals, min_bound=-10, max_bound=-5))
def test_class_property():
class MockClass:
name = "MockClass"
@classproperty
def caps_name(cls):
return cls.name.upper()
assert MockClass.caps_name == "MOCKCLASS"
def test_get_importable_subclasses_wont_get_custom_classes():
class ChildClass(ComponentBase):
pass
assert ChildClass not in get_importable_subclasses(ComponentBase)
@patch('importlib.import_module')
def test_import_or_warn_errors(dummy_importlib):
def _mock_import_function(library_str):
if library_str == "_evalml":
raise ImportError("Mock ImportError executed!")
if library_str == "attr_error_lib":
raise Exception("Mock Exception executed!")
dummy_importlib.side_effect = _mock_import_function
with pytest.warns(UserWarning, match="Missing optional dependency '_evalml'"):
import_or_raise("_evalml", warning=True)
with pytest.warns(UserWarning, match="Missing optional dependency '_evalml'. Please use pip to install _evalml. Additional error message"):
import_or_raise("_evalml", "Additional error message", warning=True)
with pytest.warns(UserWarning, match="An exception occurred while trying to import `attr_error_lib`: Mock Exception executed!"):
import_or_raise("attr_error_lib", warning=True)
@patch('evalml.utils.gen_utils.import_or_raise')
def test_jupyter_check_errors(mock_import_or_raise):
mock_import_or_raise.side_effect = ImportError
assert not jupyter_check()
mock_import_or_raise.side_effect = Exception
assert not jupyter_check()
@patch('evalml.utils.gen_utils.import_or_raise')
def test_jupyter_check(mock_import_or_raise):
mock_import_or_raise.return_value = MagicMock()
mock_import_or_raise().core.getipython.get_ipython.return_value = True
assert jupyter_check()
mock_import_or_raise().core.getipython.get_ipython.return_value = False
assert not jupyter_check()
mock_import_or_raise().core.getipython.get_ipython.return_value = None
assert not jupyter_check()
def _check_equality(data, expected, check_index_type=True):
if isinstance(data, pd.Series):
pd.testing.assert_series_equal(data, expected, check_index_type)
else:
pd.testing.assert_frame_equal(data, expected, check_index_type)
@pytest.mark.parametrize("data,num_to_pad,expected",
[(pd.Series([1, 2, 3]), 1, pd.Series([np.nan, 1, 2, 3])),
(pd.Series([1, 2, 3]), 0, pd.Series([1, 2, 3])),
(pd.Series([1, 2, 3, 4], index=pd.date_range("2020-10-01", "2020-10-04")),
2, pd.Series([np.nan, np.nan, 1, 2, 3, 4])),
(pd.DataFrame({"a": [1., 2., 3.], "b": [4., 5., 6.]}), 0,
pd.DataFrame({"a": [1., 2., 3.], "b": [4., 5., 6.]})),
(pd.DataFrame({"a": [4, 5, 6], "b": ["a", "b", "c"]}), 1,
pd.DataFrame({"a": [np.nan, 4, 5, 6], "b": [np.nan, "a", "b", "c"]})),
(pd.DataFrame({"a": [1, 0, 1]}), 2,
pd.DataFrame({"a": [np.nan, np.nan, 1, 0, 1]}))])
def test_pad_with_nans(data, num_to_pad, expected):
padded = pad_with_nans(data, num_to_pad)
_check_equality(padded, expected)
def test_pad_with_nans_with_series_name():
name = "data to pad"
data = pd.Series([1, 2, 3], name=name)
padded = pad_with_nans(data, 1)
_check_equality(padded, pd.Series([np.nan, 1, 2, 3], name=name))
@pytest.mark.parametrize("data, expected",
[([pd.Series([None, 1., 2., 3]), pd.DataFrame({"a": [1., 2., 3, None]})],
[pd.Series([1., 2.], index=pd.Int64Index([1, 2])),
pd.DataFrame({"a": [2., 3.]}, index=pd.Int64Index([1, 2]))]),
([pd.Series([None, 1., 2., 3]), pd.DataFrame({"a": [3., 4., None, None]})],
[pd.Series([1.], index= | pd.Int64Index([1]) | pandas.Int64Index |
#!/usr/bin/env python
"""
FUNCTION:
USAGE:
Copyright (c) 2017, <NAME> <<EMAIL>>
Permission to use, copy, modify, and/or distribute this software for
any purpose with or without fee is hereby granted, provided that the
above copyright notice and this permission notice appear in all
copies.
THE SOFTWARE IS PROVIDED 'AS IS' AND THE AUTHOR DISCLAIMS ALL
WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
"""
__description__ = ""
__author__ = "<NAME> <<EMAIL>>"
__copyright__ = "2017 by <NAME> <<EMAIL>>"
__license__ = "ISC license"
__email__ = "<EMAIL>"
__version__ = ""
from collections import defaultdict
import pandas as pd
class Ploidy:
def __init__(self,
hic_pro_matrix_values=None,
hic_pro_matrix_coordinates=None,
ploidy_file=None,
output_matrix_values=None,
output_matrix_coordinates=None,
bin_size=None):
self._hic_pro_matrix_values = hic_pro_matrix_values
self._hic_pro_matrix_coordinates = hic_pro_matrix_coordinates
self._ploidy_file = ploidy_file
self._output_matrix_values = output_matrix_values
self._output_matrix_coordinates = output_matrix_coordinates
self._bin_size = bin_size
def main(self):
factor_table = pd.read_table(self._ploidy_file)
interaction_matrix = self._read_hic_pro_matrix(
self._hic_pro_matrix_values,
self._hic_pro_matrix_coordinates)
interaction_matrix.set_index("Regions", inplace=True)
ploidy_patches, ploidy_factors = self._contruct_bin_patches(
interaction_matrix, factor_table, self._bin_size)
for overlapping_bins, ploidy_factor in zip(ploidy_patches, ploidy_factors):
interaction_matrix.loc[
overlapping_bins, overlapping_bins] = interaction_matrix.loc[
overlapping_bins, overlapping_bins] * ploidy_factor
self._write_matrix_in_hic_pro_format(
interaction_matrix, self._bin_size,
self._output_matrix_values, self._output_matrix_coordinates)
def _contruct_bin_patches(self, interaction_matrix, factor_table, bin_size):
"""Look for overlaps of the given annotations with bins. A bin needs
to overlap only partially with an annotated region.
--------------------------- Annotation
==== ==== ==== Bin accepted as overlapping
"""
# Build matrix of bin position to make querying easy
bin_pos_matrix = pd.DataFrame()
bin_pos_matrix.set_index = interaction_matrix.index
bin_pos_matrix["HiCMatrix"] = interaction_matrix["HiCMatrix"]
bin_pos_matrix["chrom"] = bin_pos_matrix["HiCMatrix"].apply(
lambda bin_name: "-".join(bin_name.split("-")[:-1]))
bin_pos_matrix["start"] = bin_pos_matrix["HiCMatrix"].apply(
lambda bin_name: int(bin_name.split("-")[-1]) + 1)
bin_pos_matrix["end"] = bin_pos_matrix["start"] + bin_size - 1
bin_pos_matrix["start"] = bin_pos_matrix["start"].astype(int)
bin_pos_matrix["end"] = bin_pos_matrix["end"].astype(int)
ploidy_patches = [] # Will become a list of lists
ploidy_factors = factor_table["ploidy_factor"].tolist()
for index, patch in factor_table.iterrows():
overlapping_bins = bin_pos_matrix[
(bin_pos_matrix["chrom"] == patch["chrom"])
&
(
(
(bin_pos_matrix["start"] >= patch["start"])
&
(bin_pos_matrix["start"] <= patch["stop"])
)
|
(
(bin_pos_matrix["end"] >= patch["start"])
&
(bin_pos_matrix["end"] <= patch["stop"])
)
)
]
ploidy_patches.append(overlapping_bins["HiCMatrix"].tolist())
assert len(ploidy_patches) == len(ploidy_factors)
return(ploidy_patches, ploidy_factors)
def _write_matrix_in_hic_pro_format(self,
interaction_matrix,
bin_size,
output_matrix_coordinates,
output_matrix_values):
coordinates = pd.DataFrame()
coordinates["Regions"] = interaction_matrix.index
coordinates["Chrom_name"] = coordinates["Regions"].apply(
lambda bin_name: "-".join(bin_name.split("-")[:-1]))
coordinates["Start_pos"] = coordinates["Regions"].apply(
lambda bin_name: int(bin_name.split("-")[-1]))
coordinates["End_pos"] = coordinates["Start_pos"].apply(
lambda start_pos: start_pos + bin_size)
coordinates["Id"] = [start_pos + 1 for start_pos in range(
len(coordinates["Regions"]))]
del coordinates["Regions"]
coordinates.to_csv(
output_matrix_coordinates, index=False, sep="\t", header=False)
# Generate non redundant comparison list by going throug the matrix
# like this:
# |
# ||
# |||
# ||||
# |||||
# ||||||
del interaction_matrix["HiCMatrix"]
start_row_number = 0
with open(output_matrix_values, "w") as output_fh:
for column_number, bin_name in enumerate(interaction_matrix.columns):
for row_number in range(
start_row_number, len(interaction_matrix.columns)):
interaction_value = int(round(interaction_matrix.iloc[
row_number, column_number]))
if interaction_value == 0:
continue
output_fh.write("{}\t{}\t{}\n".format(
column_number + 1, row_number + 1,
interaction_value))
start_row_number += 1
def _read_hic_pro_matrix(self, matrix_values_file, matrix_coordinates_file):
pair_value_table = pd.read_table(
matrix_values_file, names=["bin_a", "bin_b", "counting"])
binning_information = pd.read_table(
matrix_coordinates_file, names=["replicon", "start", "end", "bin_id"])
bin_id_to_name = dict([
(bin_id, "-".join([replicon, str(start)]))
for bin_id, replicon, start in zip(
binning_information.bin_id,
binning_information.replicon,
binning_information.start)])
bin_pair_to_value = defaultdict(dict)
for bin_a, bin_b, value in zip(
pair_value_table["bin_a"], pair_value_table["bin_b"],
pair_value_table["counting"]):
bin_pair_to_value[bin_a][bin_b] = value
bin_pair_to_value[bin_b][bin_a] = value
result_matrix = | pd.DataFrame() | pandas.DataFrame |
import definitions
from src.Swell import Swell
from src.DAO import DAO
import pandas as pd
import os
from datetime import datetime
class SwellDAO(DAO):
def __init__(self):
DAO.__init__(self)
def create_table(self):
cursor = self.connection.cursor()
cursor.execute("""
CREATE TABLE IF NOT EXISTS `surfdb`.`SwellStaging`(
`SwellStagingKey` BIGINT NOT NULL AUTO_INCREMENT,
`ReportGenerationDate` DATE,
`GenerationHour` INT,
`ReportForecastDate` DATE,
`ForecastHour` INT,
`TideHeight` DOUBLE,
`SurfMin` DOUBLE,
`SurfMax` DOUBLE,
`SurfOptimalScore` DOUBLE,
`WindDirection` DOUBLE,
`WindSpeed` DOUBLE,
`WindGust` DOUBLE,
`Temperature` MEDIUMINT,
`Swell1Height` DOUBLE,
`Swell1Direction` DOUBLE,
`Swell1SwellMinDirection` DOUBLE,
`Swell1Period` DOUBLE,
`Swell1OptimalScore` DOUBLE,
`Swell2Height` DOUBLE,
`Swell2Direction` DOUBLE,
`Swell2SwellMinDirection` DOUBLE,
`Swell2Period` DOUBLE,
`Swell2OptimalScore` DOUBLE,
`Swell3Height` DOUBLE,
`Swell3Direction` DOUBLE,
`Swell3SwellMinDirection` DOUBLE,
`Swell3Period` DOUBLE,
`Swell3OptimalScore` DOUBLE,
`Swell4Height` DOUBLE,
`Swell4Direction` DOUBLE,
`Swell4SwellMinDirection` DOUBLE,
`Swell4Period` DOUBLE,
`Swell4OptimalScore` DOUBLE,
`Swell5Height` DOUBLE,
`Swell5Direction` DOUBLE,
`Swell5SwellMinDirection` DOUBLE,
`Swell5Period` DOUBLE,
`Swell5OptimalScore` DOUBLE,
`Swell6Height` DOUBLE,
`Swell6Direction` DOUBLE,
`Swell6SwellMinDirection` DOUBLE,
`Swell6Period` DOUBLE,
`Swell6OptimalScore` DOUBLE,
PRIMARY KEY (`SwellStagingKey`))
ENGINE = InnoDB;
""")
def show_tables(self):
cursor = self.connection.cursor()
cursor.execute("SHOW Tables")
return cursor.fetchall()
def drop_table_if_exists(self):
cursor = self.connection.cursor()
cursor.execute("DROP TABLE IF EXISTS `surfdb`.`SwellStaging`")
self.connection.commit()
def insert_csv_into_database(self, swell: Swell):
data_path = os.path.join(definitions.ROOT_DIR, swell.swell_csv_dir)
data = pd.read_csv(data_path, encoding='unicode_escape')
df = | pd.DataFrame(data) | pandas.DataFrame |
# csv data util
import pandas as pd
from tqdm import tqdm
class csvUtil():
def __init__(self, max_length=1000,encoding='utf8'):
self.max_length = max_length
self.encoding = encoding
def divide_1000_csv_data(self,filename,):
with open(filename,"rb") as f:
data = pd.read_csv(f,encoding=self.encoding)
df = | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import pickle
import pathlib
path = pathlib.Path.cwd()
if path.stem == 'ATGC':
cwd = path
else:
cwd = list(path.parents)[::-1][path.parts.index('ATGC')]
def my_combine(col1, col2):
if col1 == col2:
return col1
else:
if pd.isna(col1):
return col2
else:
return col1
##get the coad msi data
coad_msi = pd.read_csv(cwd / 'files' / 'msi_ground_truth' / 'TCGAbiolinks' / 'msi_COAD.csv', sep=',', low_memory=False, usecols=['bcr_patient_barcode', 'mononucleotide_and_dinucleotide_marker_panel_analysis_status'])
coad_msi.set_index('bcr_patient_barcode', inplace=True)
##get the stad cancers data
stad_msi = pd.read_csv(cwd / 'files' / 'msi_ground_truth' / 'TCGAbiolinks' / 'msi_STAD.csv', sep=',', low_memory=False, usecols=['bcr_patient_barcode', 'mononucleotide_and_dinucleotide_marker_panel_analysis_status'])
stad_msi.set_index('bcr_patient_barcode', inplace=True)
##outer join
coad_stad_joined = coad_msi.join(stad_msi, how='outer', rsuffix='stad')
coad_stad_combined = coad_stad_joined.iloc[:, 0].combine(coad_stad_joined.iloc[:, 1], my_combine)
combined_df = pd.DataFrame(coad_stad_combined)
##get the read cancers data
read_msi = pd.read_csv(cwd / 'files' / 'msi_ground_truth' / 'TCGAbiolinks' / 'msi_READ.csv', sep=',', low_memory=False, usecols=['bcr_patient_barcode', 'mononucleotide_and_dinucleotide_marker_panel_analysis_status'])
read_msi.set_index('bcr_patient_barcode', inplace=True)
coad_stad_read_joined = combined_df.join(read_msi, how='outer', rsuffix='read')
coad_stad_read_combined = coad_stad_read_joined.iloc[:, 0].combine(coad_stad_read_joined.iloc[:, 1], my_combine)
combined_df = pd.DataFrame(coad_stad_read_combined)
##get the esca cancers data
esca_msi = pd.read_csv(cwd / 'files' / 'msi_ground_truth' / 'TCGAbiolinks' / 'msi_ESCA.csv', sep=',', low_memory=False, usecols=['bcr_patient_barcode', 'mononucleotide_and_dinucleotide_marker_panel_analysis_status'])
esca_msi.set_index('bcr_patient_barcode', inplace=True)
coad_stad_read_esca_joined = combined_df.join(esca_msi, how='outer', rsuffix='esca')
coad_stad_read_esca_combined = coad_stad_read_esca_joined.iloc[:, 0].combine(coad_stad_read_esca_joined.iloc[:, 1], my_combine)
combined_df = pd.DataFrame(coad_stad_read_esca_combined)
##get the ucec cancers data
ucec_msi = pd.read_csv(cwd / 'files' / 'msi_ground_truth' / 'TCGAbiolinks' / 'msi_UCEC.csv', sep=',', low_memory=False, usecols=['bcr_patient_barcode', 'mononucleotide_and_dinucleotide_marker_panel_analysis_status'])
ucec_msi.set_index('bcr_patient_barcode', inplace=True)
coad_stad_read_esca_ucec_joined = combined_df.join(ucec_msi, how='outer', rsuffix='ucec')
coad_stad_read_esca_ucec_combined = coad_stad_read_esca_ucec_joined.iloc[:, 0].combine(coad_stad_read_esca_ucec_joined.iloc[:, 1], my_combine)
combined_df = pd.DataFrame(coad_stad_read_esca_ucec_combined)
##get the ucs cancers data
ucs_msi = | pd.read_csv(cwd / 'files' / 'msi_ground_truth' / 'TCGAbiolinks' / 'msi_UCS.csv', sep=',', low_memory=False, usecols=['bcr_patient_barcode', 'mononucleotide_and_dinucleotide_marker_panel_analysis_status']) | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.